Bug #261992 - Replace the EbuildBuildDir.clean() method with a clean_log()
[portage.git] / pym / _emerge / __init__.py
1 #!/usr/bin/python -O
2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id$
5
6 import array
7 from collections import deque
8 import fcntl
9 import formatter
10 import logging
11 import pwd
12 import select
13 import shlex
14 import shutil
15 import signal
16 import sys
17 import textwrap
18 import urlparse
19 import weakref
20 import gc
21 import os, stat
22 import platform
23
24 try:
25         import portage
26 except ImportError:
27         from os import path as osp
28         sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
29         import portage
30
31 from portage import digraph
32 from portage.const import NEWS_LIB_PATH
33
34 import _emerge.help
35 import portage.xpak, commands, errno, re, socket, time
36 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
37         nc_len, red, teal, turquoise, xtermTitle, \
38         xtermTitleReset, yellow
39 from portage.output import create_color_func
40 good = create_color_func("GOOD")
41 bad = create_color_func("BAD")
42 # white looks bad on terminals with white background
43 from portage.output import bold as white
44
45 import portage.elog
46 import portage.dep
47 portage.dep._dep_check_strict = True
48 import portage.util
49 import portage.locks
50 import portage.exception
51 from portage.data import secpass
52 from portage.elog.messages import eerror
53 from portage.util import normalize_path as normpath
54 from portage.util import cmp_sort_key, writemsg, writemsg_level
55 from portage.sets import load_default_config, SETPREFIX
56 from portage.sets.base import InternalPackageSet
57
58 from itertools import chain, izip
59
60 try:
61         import cPickle as pickle
62 except ImportError:
63         import pickle
64
65 try:
66         from cStringIO import StringIO
67 except ImportError:
68         from StringIO import StringIO
69
70 class stdout_spinner(object):
71         scroll_msgs = [
72                 "Gentoo Rocks ("+platform.system()+")",
73                 "Thank you for using Gentoo. :)",
74                 "Are you actually trying to read this?",
75                 "How many times have you stared at this?",
76                 "We are generating the cache right now",
77                 "You are paying too much attention.",
78                 "A theory is better than its explanation.",
79                 "Phasers locked on target, Captain.",
80                 "Thrashing is just virtual crashing.",
81                 "To be is to program.",
82                 "Real Users hate Real Programmers.",
83                 "When all else fails, read the instructions.",
84                 "Functionality breeds Contempt.",
85                 "The future lies ahead.",
86                 "3.1415926535897932384626433832795028841971694",
87                 "Sometimes insanity is the only alternative.",
88                 "Inaccuracy saves a world of explanation.",
89         ]
90
91         twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
92
93         def __init__(self):
94                 self.spinpos = 0
95                 self.update = self.update_twirl
96                 self.scroll_sequence = self.scroll_msgs[
97                         int(time.time() * 100) % len(self.scroll_msgs)]
98                 self.last_update = 0
99                 self.min_display_latency = 0.05
100
101         def _return_early(self):
102                 """
103                 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
104                 each update* method should return without doing any output when this
105                 method returns True.
106                 """
107                 cur_time = time.time()
108                 if cur_time - self.last_update < self.min_display_latency:
109                         return True
110                 self.last_update = cur_time
111                 return False
112
113         def update_basic(self):
114                 self.spinpos = (self.spinpos + 1) % 500
115                 if self._return_early():
116                         return
117                 if (self.spinpos % 100) == 0:
118                         if self.spinpos == 0:
119                                 sys.stdout.write(". ")
120                         else:
121                                 sys.stdout.write(".")
122                 sys.stdout.flush()
123
124         def update_scroll(self):
125                 if self._return_early():
126                         return
127                 if(self.spinpos >= len(self.scroll_sequence)):
128                         sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
129                                 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
130                 else:
131                         sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
132                 sys.stdout.flush()
133                 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
134
135         def update_twirl(self):
136                 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
137                 if self._return_early():
138                         return
139                 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
140                 sys.stdout.flush()
141
142         def update_quiet(self):
143                 return
144
145 def userquery(prompt, responses=None, colours=None):
146         """Displays a prompt and a set of responses, then waits for a response
147         which is checked against the responses and the first to match is
148         returned.  An empty response will match the first value in responses.  The
149         input buffer is *not* cleared prior to the prompt!
150
151         prompt: a String.
152         responses: a List of Strings.
153         colours: a List of Functions taking and returning a String, used to
154         process the responses for display. Typically these will be functions
155         like red() but could be e.g. lambda x: "DisplayString".
156         If responses is omitted, defaults to ["Yes", "No"], [green, red].
157         If only colours is omitted, defaults to [bold, ...].
158
159         Returns a member of the List responses. (If called without optional
160         arguments, returns "Yes" or "No".)
161         KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
162         printed."""
163         if responses is None:
164                 responses = ["Yes", "No"]
165                 colours = [
166                         create_color_func("PROMPT_CHOICE_DEFAULT"),
167                         create_color_func("PROMPT_CHOICE_OTHER")
168                 ]
169         elif colours is None:
170                 colours=[bold]
171         colours=(colours*len(responses))[:len(responses)]
172         print bold(prompt),
173         try:
174                 while True:
175                         response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
176                         for key in responses:
177                                 # An empty response will match the first value in responses.
178                                 if response.upper()==key[:len(response)].upper():
179                                         return key
180                         print "Sorry, response '%s' not understood." % response,
181         except (EOFError, KeyboardInterrupt):
182                 print "Interrupted."
183                 sys.exit(1)
184
185 actions = frozenset([
186 "clean", "config", "depclean",
187 "info", "list-sets", "metadata",
188 "prune", "regen",  "search",
189 "sync",  "unmerge", "version",
190 ])
191 options=[
192 "--ask",          "--alphabetical",
193 "--buildpkg",     "--buildpkgonly",
194 "--changelog",    "--columns",
195 "--complete-graph",
196 "--debug",        "--deep",
197 "--digest",
198 "--emptytree",
199 "--fetchonly",    "--fetch-all-uri",
200 "--getbinpkg",    "--getbinpkgonly",
201 "--help",         "--ignore-default-opts",
202 "--keep-going",
203 "--noconfmem",
204 "--newuse",       "--nocolor",
205 "--nodeps",       "--noreplace",
206 "--nospinner",    "--oneshot",
207 "--onlydeps",     "--pretend",
208 "--quiet",        "--resume",
209 "--searchdesc",   "--selective",
210 "--skipfirst",
211 "--tree",
212 "--update",
213 "--usepkg",       "--usepkgonly",
214 "--verbose",
215 ]
216
217 shortmapping={
218 "1":"--oneshot",
219 "a":"--ask",
220 "b":"--buildpkg",  "B":"--buildpkgonly",
221 "c":"--clean",     "C":"--unmerge",
222 "d":"--debug",     "D":"--deep",
223 "e":"--emptytree",
224 "f":"--fetchonly", "F":"--fetch-all-uri",
225 "g":"--getbinpkg", "G":"--getbinpkgonly",
226 "h":"--help",
227 "k":"--usepkg",    "K":"--usepkgonly",
228 "l":"--changelog",
229 "n":"--noreplace", "N":"--newuse",
230 "o":"--onlydeps",  "O":"--nodeps",
231 "p":"--pretend",   "P":"--prune",
232 "q":"--quiet",
233 "s":"--search",    "S":"--searchdesc",
234 "t":"--tree",
235 "u":"--update",
236 "v":"--verbose",   "V":"--version"
237 }
238
239 def emergelog(xterm_titles, mystr, short_msg=None):
240         if xterm_titles and short_msg:
241                 if "HOSTNAME" in os.environ:
242                         short_msg = os.environ["HOSTNAME"]+": "+short_msg
243                 xtermTitle(short_msg)
244         try:
245                 file_path = "/var/log/emerge.log"
246                 mylogfile = open(file_path, "a")
247                 portage.util.apply_secpass_permissions(file_path,
248                         uid=portage.portage_uid, gid=portage.portage_gid,
249                         mode=0660)
250                 mylock = None
251                 try:
252                         mylock = portage.locks.lockfile(mylogfile)
253                         # seek because we may have gotten held up by the lock.
254                         # if so, we may not be positioned at the end of the file.
255                         mylogfile.seek(0, 2)
256                         mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
257                         mylogfile.flush()
258                 finally:
259                         if mylock:
260                                 portage.locks.unlockfile(mylock)
261                         mylogfile.close()
262         except (IOError,OSError,portage.exception.PortageException), e:
263                 if secpass >= 1:
264                         print >> sys.stderr, "emergelog():",e
265
266 def countdown(secs=5, doing="Starting"):
267         if secs:
268                 print ">>> Waiting",secs,"seconds before starting..."
269                 print ">>> (Control-C to abort)...\n"+doing+" in: ",
270                 ticks=range(secs)
271                 ticks.reverse()
272                 for sec in ticks:
273                         sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
274                         sys.stdout.flush()
275                         time.sleep(1)
276                 print
277
278 # formats a size given in bytes nicely
279 def format_size(mysize):
280         if isinstance(mysize, basestring):
281                 return mysize
282         if 0 != mysize % 1024:
283                 # Always round up to the next kB so that it doesn't show 0 kB when
284                 # some small file still needs to be fetched.
285                 mysize += 1024 - mysize % 1024
286         mystr=str(mysize/1024)
287         mycount=len(mystr)
288         while (mycount > 3):
289                 mycount-=3
290                 mystr=mystr[:mycount]+","+mystr[mycount:]
291         return mystr+" kB"
292
293
294 def getgccversion(chost):
295         """
296         rtype: C{str}
297         return:  the current in-use gcc version
298         """
299
300         gcc_ver_command = 'gcc -dumpversion'
301         gcc_ver_prefix = 'gcc-'
302
303         gcc_not_found_error = red(
304         "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
305         "!!! to update the environment of this terminal and possibly\n" +
306         "!!! other terminals also.\n"
307         )
308
309         mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
310         if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
311                 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
312
313         mystatus, myoutput = commands.getstatusoutput(
314                 chost + "-" + gcc_ver_command)
315         if mystatus == os.EX_OK:
316                 return gcc_ver_prefix + myoutput
317
318         mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
319         if mystatus == os.EX_OK:
320                 return gcc_ver_prefix + myoutput
321
322         portage.writemsg(gcc_not_found_error, noiselevel=-1)
323         return "[unavailable]"
324
325 def getportageversion(portdir, target_root, profile, chost, vardb):
326         profilever = "unavailable"
327         if profile:
328                 realpath = os.path.realpath(profile)
329                 basepath   = os.path.realpath(os.path.join(portdir, "profiles"))
330                 if realpath.startswith(basepath):
331                         profilever = realpath[1 + len(basepath):]
332                 else:
333                         try:
334                                 profilever = "!" + os.readlink(profile)
335                         except (OSError):
336                                 pass
337                 del realpath, basepath
338
339         libcver=[]
340         libclist  = vardb.match("virtual/libc")
341         libclist += vardb.match("virtual/glibc")
342         libclist  = portage.util.unique_array(libclist)
343         for x in libclist:
344                 xs=portage.catpkgsplit(x)
345                 if libcver:
346                         libcver+=","+"-".join(xs[1:])
347                 else:
348                         libcver="-".join(xs[1:])
349         if libcver==[]:
350                 libcver="unavailable"
351
352         gccver = getgccversion(chost)
353         unameout=platform.release()+" "+platform.machine()
354
355         return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
356
357 def create_depgraph_params(myopts, myaction):
358         #configure emerge engine parameters
359         #
360         # self:      include _this_ package regardless of if it is merged.
361         # selective: exclude the package if it is merged
362         # recurse:   go into the dependencies
363         # deep:      go into the dependencies of already merged packages
364         # empty:     pretend nothing is merged
365         # complete:  completely account for all known dependencies
366         # remove:    build graph for use in removing packages
367         myparams = set(["recurse"])
368
369         if myaction == "remove":
370                 myparams.add("remove")
371                 myparams.add("complete")
372                 return myparams
373
374         if "--update" in myopts or \
375                 "--newuse" in myopts or \
376                 "--reinstall" in myopts or \
377                 "--noreplace" in myopts:
378                 myparams.add("selective")
379         if "--emptytree" in myopts:
380                 myparams.add("empty")
381                 myparams.discard("selective")
382         if "--nodeps" in myopts:
383                 myparams.discard("recurse")
384         if "--deep" in myopts:
385                 myparams.add("deep")
386         if "--complete-graph" in myopts:
387                 myparams.add("complete")
388         return myparams
389
390 # search functionality
391 class search(object):
392
393         #
394         # class constants
395         #
396         VERSION_SHORT=1
397         VERSION_RELEASE=2
398
399         #
400         # public interface
401         #
402         def __init__(self, root_config, spinner, searchdesc,
403                 verbose, usepkg, usepkgonly):
404                 """Searches the available and installed packages for the supplied search key.
405                 The list of available and installed packages is created at object instantiation.
406                 This makes successive searches faster."""
407                 self.settings = root_config.settings
408                 self.vartree = root_config.trees["vartree"]
409                 self.spinner = spinner
410                 self.verbose = verbose
411                 self.searchdesc = searchdesc
412                 self.root_config = root_config
413                 self.setconfig = root_config.setconfig
414                 self.matches = {"pkg" : []}
415                 self.mlen = 0
416
417                 def fake_portdb():
418                         pass
419                 self.portdb = fake_portdb
420                 for attrib in ("aux_get", "cp_all",
421                         "xmatch", "findname", "getFetchMap"):
422                         setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
423
424                 self._dbs = []
425
426                 portdb = root_config.trees["porttree"].dbapi
427                 bindb = root_config.trees["bintree"].dbapi
428                 vardb = root_config.trees["vartree"].dbapi
429
430                 if not usepkgonly and portdb._have_root_eclass_dir:
431                         self._dbs.append(portdb)
432
433                 if (usepkg or usepkgonly) and bindb.cp_all():
434                         self._dbs.append(bindb)
435
436                 self._dbs.append(vardb)
437                 self._portdb = portdb
438
439         def _cp_all(self):
440                 cp_all = set()
441                 for db in self._dbs:
442                         cp_all.update(db.cp_all())
443                 return list(sorted(cp_all))
444
445         def _aux_get(self, *args, **kwargs):
446                 for db in self._dbs:
447                         try:
448                                 return db.aux_get(*args, **kwargs)
449                         except KeyError:
450                                 pass
451                 raise
452
453         def _findname(self, *args, **kwargs):
454                 for db in self._dbs:
455                         if db is not self._portdb:
456                                 # We don't want findname to return anything
457                                 # unless it's an ebuild in a portage tree.
458                                 # Otherwise, it's already built and we don't
459                                 # care about it.
460                                 continue
461                         func = getattr(db, "findname", None)
462                         if func:
463                                 value = func(*args, **kwargs)
464                                 if value:
465                                         return value
466                 return None
467
468         def _getFetchMap(self, *args, **kwargs):
469                 for db in self._dbs:
470                         func = getattr(db, "getFetchMap", None)
471                         if func:
472                                 value = func(*args, **kwargs)
473                                 if value:
474                                         return value
475                 return {}
476
477         def _visible(self, db, cpv, metadata):
478                 installed = db is self.vartree.dbapi
479                 built = installed or db is not self._portdb
480                 pkg_type = "ebuild"
481                 if installed:
482                         pkg_type = "installed"
483                 elif built:
484                         pkg_type = "binary"
485                 return visible(self.settings,
486                         Package(type_name=pkg_type, root_config=self.root_config,
487                         cpv=cpv, built=built, installed=installed, metadata=metadata))
488
489         def _xmatch(self, level, atom):
490                 """
491                 This method does not expand old-style virtuals because it
492                 is restricted to returning matches for a single ${CATEGORY}/${PN}
493                 and old-style virual matches unreliable for that when querying
494                 multiple package databases. If necessary, old-style virtuals
495                 can be performed on atoms prior to calling this method.
496                 """
497                 cp = portage.dep_getkey(atom)
498                 if level == "match-all":
499                         matches = set()
500                         for db in self._dbs:
501                                 if hasattr(db, "xmatch"):
502                                         matches.update(db.xmatch(level, atom))
503                                 else:
504                                         matches.update(db.match(atom))
505                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
506                         db._cpv_sort_ascending(result)
507                 elif level == "match-visible":
508                         matches = set()
509                         for db in self._dbs:
510                                 if hasattr(db, "xmatch"):
511                                         matches.update(db.xmatch(level, atom))
512                                 else:
513                                         db_keys = list(db._aux_cache_keys)
514                                         for cpv in db.match(atom):
515                                                 metadata = izip(db_keys,
516                                                         db.aux_get(cpv, db_keys))
517                                                 if not self._visible(db, cpv, metadata):
518                                                         continue
519                                                 matches.add(cpv)
520                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
521                         db._cpv_sort_ascending(result)
522                 elif level == "bestmatch-visible":
523                         result = None
524                         for db in self._dbs:
525                                 if hasattr(db, "xmatch"):
526                                         cpv = db.xmatch("bestmatch-visible", atom)
527                                         if not cpv or portage.cpv_getkey(cpv) != cp:
528                                                 continue
529                                         if not result or cpv == portage.best([cpv, result]):
530                                                 result = cpv
531                                 else:
532                                         db_keys = Package.metadata_keys
533                                         # break out of this loop with highest visible
534                                         # match, checked in descending order
535                                         for cpv in reversed(db.match(atom)):
536                                                 if portage.cpv_getkey(cpv) != cp:
537                                                         continue
538                                                 metadata = izip(db_keys,
539                                                         db.aux_get(cpv, db_keys))
540                                                 if not self._visible(db, cpv, metadata):
541                                                         continue
542                                                 if not result or cpv == portage.best([cpv, result]):
543                                                         result = cpv
544                                                 break
545                 else:
546                         raise NotImplementedError(level)
547                 return result
548
549         def execute(self,searchkey):
550                 """Performs the search for the supplied search key"""
551                 match_category = 0
552                 self.searchkey=searchkey
553                 self.packagematches = []
554                 if self.searchdesc:
555                         self.searchdesc=1
556                         self.matches = {"pkg":[], "desc":[], "set":[]}
557                 else:
558                         self.searchdesc=0
559                         self.matches = {"pkg":[], "set":[]}
560                 print "Searching...   ",
561
562                 regexsearch = False
563                 if self.searchkey.startswith('%'):
564                         regexsearch = True
565                         self.searchkey = self.searchkey[1:]
566                 if self.searchkey.startswith('@'):
567                         match_category = 1
568                         self.searchkey = self.searchkey[1:]
569                 if regexsearch:
570                         self.searchre=re.compile(self.searchkey,re.I)
571                 else:
572                         self.searchre=re.compile(re.escape(self.searchkey), re.I)
573                 for package in self.portdb.cp_all():
574                         self.spinner.update()
575
576                         if match_category:
577                                 match_string  = package[:]
578                         else:
579                                 match_string  = package.split("/")[-1]
580
581                         masked=0
582                         if self.searchre.search(match_string):
583                                 if not self.portdb.xmatch("match-visible", package):
584                                         masked=1
585                                 self.matches["pkg"].append([package,masked])
586                         elif self.searchdesc: # DESCRIPTION searching
587                                 full_package = self.portdb.xmatch("bestmatch-visible", package)
588                                 if not full_package:
589                                         #no match found; we don't want to query description
590                                         full_package = portage.best(
591                                                 self.portdb.xmatch("match-all", package))
592                                         if not full_package:
593                                                 continue
594                                         else:
595                                                 masked=1
596                                 try:
597                                         full_desc = self.portdb.aux_get(
598                                                 full_package, ["DESCRIPTION"])[0]
599                                 except KeyError:
600                                         print "emerge: search: aux_get() failed, skipping"
601                                         continue
602                                 if self.searchre.search(full_desc):
603                                         self.matches["desc"].append([full_package,masked])
604
605                 self.sdict = self.setconfig.getSets()
606                 for setname in self.sdict:
607                         self.spinner.update()
608                         if match_category:
609                                 match_string = setname
610                         else:
611                                 match_string = setname.split("/")[-1]
612                         
613                         if self.searchre.search(match_string):
614                                 self.matches["set"].append([setname, False])
615                         elif self.searchdesc:
616                                 if self.searchre.search(
617                                         self.sdict[setname].getMetadata("DESCRIPTION")):
618                                         self.matches["set"].append([setname, False])
619                         
620                 self.mlen=0
621                 for mtype in self.matches:
622                         self.matches[mtype].sort()
623                         self.mlen += len(self.matches[mtype])
624
625         def addCP(self, cp):
626                 if not self.portdb.xmatch("match-all", cp):
627                         return
628                 masked = 0
629                 if not self.portdb.xmatch("bestmatch-visible", cp):
630                         masked = 1
631                 self.matches["pkg"].append([cp, masked])
632                 self.mlen += 1
633
634         def output(self):
635                 """Outputs the results of the search."""
636                 print "\b\b  \n[ Results for search key : "+white(self.searchkey)+" ]"
637                 print "[ Applications found : "+white(str(self.mlen))+" ]"
638                 print " "
639                 vardb = self.vartree.dbapi
640                 for mtype in self.matches:
641                         for match,masked in self.matches[mtype]:
642                                 full_package = None
643                                 if mtype == "pkg":
644                                         catpack = match
645                                         full_package = self.portdb.xmatch(
646                                                 "bestmatch-visible", match)
647                                         if not full_package:
648                                                 #no match found; we don't want to query description
649                                                 masked=1
650                                                 full_package = portage.best(
651                                                         self.portdb.xmatch("match-all",match))
652                                 elif mtype == "desc":
653                                         full_package = match
654                                         match        = portage.cpv_getkey(match)
655                                 elif mtype == "set":
656                                         print green("*")+"  "+white(match)
657                                         print "     ", darkgreen("Description:")+"  ", self.sdict[match].getMetadata("DESCRIPTION")
658                                         print
659                                 if full_package:
660                                         try:
661                                                 desc, homepage, license = self.portdb.aux_get(
662                                                         full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
663                                         except KeyError:
664                                                 print "emerge: search: aux_get() failed, skipping"
665                                                 continue
666                                         if masked:
667                                                 print green("*")+"  "+white(match)+" "+red("[ Masked ]")
668                                         else:
669                                                 print green("*")+"  "+white(match)
670                                         myversion = self.getVersion(full_package, search.VERSION_RELEASE)
671
672                                         mysum = [0,0]
673                                         file_size_str = None
674                                         mycat = match.split("/")[0]
675                                         mypkg = match.split("/")[1]
676                                         mycpv = match + "-" + myversion
677                                         myebuild = self.portdb.findname(mycpv)
678                                         if myebuild:
679                                                 pkgdir = os.path.dirname(myebuild)
680                                                 from portage import manifest
681                                                 mf = manifest.Manifest(
682                                                         pkgdir, self.settings["DISTDIR"])
683                                                 try:
684                                                         uri_map = self.portdb.getFetchMap(mycpv)
685                                                 except portage.exception.InvalidDependString, e:
686                                                         file_size_str = "Unknown (%s)" % (e,)
687                                                         del e
688                                                 else:
689                                                         try:
690                                                                 mysum[0] = mf.getDistfilesSize(uri_map)
691                                                         except KeyError, e:
692                                                                 file_size_str = "Unknown (missing " + \
693                                                                         "digest for %s)" % (e,)
694                                                                 del e
695
696                                         available = False
697                                         for db in self._dbs:
698                                                 if db is not vardb and \
699                                                         db.cpv_exists(mycpv):
700                                                         available = True
701                                                         if not myebuild and hasattr(db, "bintree"):
702                                                                 myebuild = db.bintree.getname(mycpv)
703                                                                 try:
704                                                                         mysum[0] = os.stat(myebuild).st_size
705                                                                 except OSError:
706                                                                         myebuild = None
707                                                         break
708
709                                         if myebuild and file_size_str is None:
710                                                 mystr = str(mysum[0] / 1024)
711                                                 mycount = len(mystr)
712                                                 while (mycount > 3):
713                                                         mycount -= 3
714                                                         mystr = mystr[:mycount] + "," + mystr[mycount:]
715                                                 file_size_str = mystr + " kB"
716
717                                         if self.verbose:
718                                                 if available:
719                                                         print "     ", darkgreen("Latest version available:"),myversion
720                                                 print "     ", self.getInstallationStatus(mycat+'/'+mypkg)
721                                                 if myebuild:
722                                                         print "      %s %s" % \
723                                                                 (darkgreen("Size of files:"), file_size_str)
724                                                 print "     ", darkgreen("Homepage:")+"     ",homepage
725                                                 print "     ", darkgreen("Description:")+"  ",desc
726                                                 print "     ", darkgreen("License:")+"      ",license
727                                                 print
728         #
729         # private interface
730         #
731         def getInstallationStatus(self,package):
732                 installed_package = self.vartree.dep_bestmatch(package)
733                 result = ""
734                 version = self.getVersion(installed_package,search.VERSION_RELEASE)
735                 if len(version) > 0:
736                         result = darkgreen("Latest version installed:")+" "+version
737                 else:
738                         result = darkgreen("Latest version installed:")+" [ Not Installed ]"
739                 return result
740
741         def getVersion(self,full_package,detail):
742                 if len(full_package) > 1:
743                         package_parts = portage.catpkgsplit(full_package)
744                         if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
745                                 result = package_parts[2]+ "-" + package_parts[3]
746                         else:
747                                 result = package_parts[2]
748                 else:
749                         result = ""
750                 return result
751
752 class RootConfig(object):
753         """This is used internally by depgraph to track information about a
754         particular $ROOT."""
755
756         pkg_tree_map = {
757                 "ebuild"    : "porttree",
758                 "binary"    : "bintree",
759                 "installed" : "vartree"
760         }
761
762         tree_pkg_map = {}
763         for k, v in pkg_tree_map.iteritems():
764                 tree_pkg_map[v] = k
765
766         def __init__(self, settings, trees, setconfig):
767                 self.trees = trees
768                 self.settings = settings
769                 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
770                 self.root = self.settings["ROOT"]
771                 self.setconfig = setconfig
772                 self.sets = self.setconfig.getSets()
773                 self.visible_pkgs = PackageVirtualDbapi(self.settings)
774
775 def create_world_atom(pkg, args_set, root_config):
776         """Create a new atom for the world file if one does not exist.  If the
777         argument atom is precise enough to identify a specific slot then a slot
778         atom will be returned. Atoms that are in the system set may also be stored
779         in world since system atoms can only match one slot while world atoms can
780         be greedy with respect to slots.  Unslotted system packages will not be
781         stored in world."""
782
783         arg_atom = args_set.findAtomForPackage(pkg)
784         if not arg_atom:
785                 return None
786         cp = portage.dep_getkey(arg_atom)
787         new_world_atom = cp
788         sets = root_config.sets
789         portdb = root_config.trees["porttree"].dbapi
790         vardb = root_config.trees["vartree"].dbapi
791         available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
792                 for cpv in portdb.match(cp))
793         slotted = len(available_slots) > 1 or \
794                 (len(available_slots) == 1 and "0" not in available_slots)
795         if not slotted:
796                 # check the vdb in case this is multislot
797                 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
798                         for cpv in vardb.match(cp))
799                 slotted = len(available_slots) > 1 or \
800                         (len(available_slots) == 1 and "0" not in available_slots)
801         if slotted and arg_atom != cp:
802                 # If the user gave a specific atom, store it as a
803                 # slot atom in the world file.
804                 slot_atom = pkg.slot_atom
805
806                 # For USE=multislot, there are a couple of cases to
807                 # handle here:
808                 #
809                 # 1) SLOT="0", but the real SLOT spontaneously changed to some
810                 #    unknown value, so just record an unslotted atom.
811                 #
812                 # 2) SLOT comes from an installed package and there is no
813                 #    matching SLOT in the portage tree.
814                 #
815                 # Make sure that the slot atom is available in either the
816                 # portdb or the vardb, since otherwise the user certainly
817                 # doesn't want the SLOT atom recorded in the world file
818                 # (case 1 above).  If it's only available in the vardb,
819                 # the user may be trying to prevent a USE=multislot
820                 # package from being removed by --depclean (case 2 above).
821
822                 mydb = portdb
823                 if not portdb.match(slot_atom):
824                         # SLOT seems to come from an installed multislot package
825                         mydb = vardb
826                 # If there is no installed package matching the SLOT atom,
827                 # it probably changed SLOT spontaneously due to USE=multislot,
828                 # so just record an unslotted atom.
829                 if vardb.match(slot_atom):
830                         # Now verify that the argument is precise
831                         # enough to identify a specific slot.
832                         matches = mydb.match(arg_atom)
833                         matched_slots = set()
834                         for cpv in matches:
835                                 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
836                         if len(matched_slots) == 1:
837                                 new_world_atom = slot_atom
838
839         if new_world_atom == sets["world"].findAtomForPackage(pkg):
840                 # Both atoms would be identical, so there's nothing to add.
841                 return None
842         if not slotted:
843                 # Unlike world atoms, system atoms are not greedy for slots, so they
844                 # can't be safely excluded from world if they are slotted.
845                 system_atom = sets["system"].findAtomForPackage(pkg)
846                 if system_atom:
847                         if not portage.dep_getkey(system_atom).startswith("virtual/"):
848                                 return None
849                         # System virtuals aren't safe to exclude from world since they can
850                         # match multiple old-style virtuals but only one of them will be
851                         # pulled in by update or depclean.
852                         providers = portdb.mysettings.getvirtuals().get(
853                                 portage.dep_getkey(system_atom))
854                         if providers and len(providers) == 1 and providers[0] == cp:
855                                 return None
856         return new_world_atom
857
858 def filter_iuse_defaults(iuse):
859         for flag in iuse:
860                 if flag.startswith("+") or flag.startswith("-"):
861                         yield flag[1:]
862                 else:
863                         yield flag
864
865 class SlotObject(object):
866         __slots__ = ("__weakref__",)
867
868         def __init__(self, **kwargs):
869                 classes = [self.__class__]
870                 while classes:
871                         c = classes.pop()
872                         if c is SlotObject:
873                                 continue
874                         classes.extend(c.__bases__)
875                         slots = getattr(c, "__slots__", None)
876                         if not slots:
877                                 continue
878                         for myattr in slots:
879                                 myvalue = kwargs.get(myattr, None)
880                                 setattr(self, myattr, myvalue)
881
882         def copy(self):
883                 """
884                 Create a new instance and copy all attributes
885                 defined from __slots__ (including those from
886                 inherited classes).
887                 """
888                 obj = self.__class__()
889
890                 classes = [self.__class__]
891                 while classes:
892                         c = classes.pop()
893                         if c is SlotObject:
894                                 continue
895                         classes.extend(c.__bases__)
896                         slots = getattr(c, "__slots__", None)
897                         if not slots:
898                                 continue
899                         for myattr in slots:
900                                 setattr(obj, myattr, getattr(self, myattr))
901
902                 return obj
903
904 class AbstractDepPriority(SlotObject):
905         __slots__ = ("buildtime", "runtime", "runtime_post")
906
907         def __lt__(self, other):
908                 return self.__int__() < other
909
910         def __le__(self, other):
911                 return self.__int__() <= other
912
913         def __eq__(self, other):
914                 return self.__int__() == other
915
916         def __ne__(self, other):
917                 return self.__int__() != other
918
919         def __gt__(self, other):
920                 return self.__int__() > other
921
922         def __ge__(self, other):
923                 return self.__int__() >= other
924
925         def copy(self):
926                 import copy
927                 return copy.copy(self)
928
929 class DepPriority(AbstractDepPriority):
930
931         __slots__ = ("satisfied", "optional", "rebuild")
932
933         def __int__(self):
934                 return 0
935
936         def __str__(self):
937                 if self.optional:
938                         return "optional"
939                 if self.buildtime:
940                         return "buildtime"
941                 if self.runtime:
942                         return "runtime"
943                 if self.runtime_post:
944                         return "runtime_post"
945                 return "soft"
946
947 class BlockerDepPriority(DepPriority):
948         __slots__ = ()
949         def __int__(self):
950                 return 0
951
952         def __str__(self):
953                 return 'blocker'
954
955 BlockerDepPriority.instance = BlockerDepPriority()
956
957 class UnmergeDepPriority(AbstractDepPriority):
958         __slots__ = ("optional", "satisfied",)
959         """
960         Combination of properties           Priority  Category
961
962         runtime                                0       HARD
963         runtime_post                          -1       HARD
964         buildtime                             -2       SOFT
965         (none of the above)                   -2       SOFT
966         """
967
968         MAX    =  0
969         SOFT   = -2
970         MIN    = -2
971
972         def __int__(self):
973                 if self.runtime:
974                         return 0
975                 if self.runtime_post:
976                         return -1
977                 if self.buildtime:
978                         return -2
979                 return -2
980
981         def __str__(self):
982                 myvalue = self.__int__()
983                 if myvalue > self.SOFT:
984                         return "hard"
985                 return "soft"
986
987 class DepPriorityNormalRange(object):
988         """
989         DepPriority properties              Index      Category
990
991         buildtime                                      HARD
992         runtime                                3       MEDIUM
993         runtime_post                           2       MEDIUM_SOFT
994         optional                               1       SOFT
995         (none of the above)                    0       NONE
996         """
997         MEDIUM      = 3
998         MEDIUM_SOFT = 2
999         SOFT        = 1
1000         NONE        = 0
1001
1002         @classmethod
1003         def _ignore_optional(cls, priority):
1004                 if priority.__class__ is not DepPriority:
1005                         return False
1006                 return bool(priority.optional)
1007
1008         @classmethod
1009         def _ignore_runtime_post(cls, priority):
1010                 if priority.__class__ is not DepPriority:
1011                         return False
1012                 return bool(priority.optional or priority.runtime_post)
1013
1014         @classmethod
1015         def _ignore_runtime(cls, priority):
1016                 if priority.__class__ is not DepPriority:
1017                         return False
1018                 return not priority.buildtime
1019
1020         ignore_medium      = _ignore_runtime
1021         ignore_medium_soft = _ignore_runtime_post
1022         ignore_soft        = _ignore_optional
1023
1024 DepPriorityNormalRange.ignore_priority = (
1025         None,
1026         DepPriorityNormalRange._ignore_optional,
1027         DepPriorityNormalRange._ignore_runtime_post,
1028         DepPriorityNormalRange._ignore_runtime
1029 )
1030
1031 class DepPrioritySatisfiedRange(object):
1032         """
1033         DepPriority                         Index      Category
1034
1035         not satisfied and buildtime                    HARD
1036         not satisfied and runtime              7       MEDIUM
1037         not satisfied and runtime_post         6       MEDIUM_SOFT
1038         satisfied and buildtime and rebuild    5       SOFT
1039         satisfied and buildtime                4       SOFT
1040         satisfied and runtime                  3       SOFT
1041         satisfied and runtime_post             2       SOFT
1042         optional                               1       SOFT
1043         (none of the above)                    0       NONE
1044         """
1045         MEDIUM      = 7
1046         MEDIUM_SOFT = 6
1047         SOFT        = 5
1048         NONE        = 0
1049
1050         @classmethod
1051         def _ignore_optional(cls, priority):
1052                 if priority.__class__ is not DepPriority:
1053                         return False
1054                 return bool(priority.optional)
1055
1056         @classmethod
1057         def _ignore_satisfied_runtime_post(cls, priority):
1058                 if priority.__class__ is not DepPriority:
1059                         return False
1060                 if priority.optional:
1061                         return True
1062                 if not priority.satisfied:
1063                         return False
1064                 return bool(priority.runtime_post)
1065
1066         @classmethod
1067         def _ignore_satisfied_runtime(cls, priority):
1068                 if priority.__class__ is not DepPriority:
1069                         return False
1070                 if priority.optional:
1071                         return True
1072                 if not priority.satisfied:
1073                         return False
1074                 return not priority.buildtime
1075
1076         @classmethod
1077         def _ignore_satisfied_buildtime(cls, priority):
1078                 if priority.__class__ is not DepPriority:
1079                         return False
1080                 if priority.optional:
1081                         return True
1082                 if not priority.satisfied:
1083                         return False
1084                 if priority.buildtime:
1085                         return not priority.rebuild
1086                 return True
1087
1088         @classmethod
1089         def _ignore_satisfied_buildtime_rebuild(cls, priority):
1090                 if priority.__class__ is not DepPriority:
1091                         return False
1092                 if priority.optional:
1093                         return True
1094                 return bool(priority.satisfied)
1095
1096         @classmethod
1097         def _ignore_runtime_post(cls, priority):
1098                 if priority.__class__ is not DepPriority:
1099                         return False
1100                 return bool(priority.optional or \
1101                         priority.satisfied or \
1102                         priority.runtime_post)
1103
1104         @classmethod
1105         def _ignore_runtime(cls, priority):
1106                 if priority.__class__ is not DepPriority:
1107                         return False
1108                 return bool(priority.satisfied or \
1109                         not priority.buildtime)
1110
1111         ignore_medium      = _ignore_runtime
1112         ignore_medium_soft = _ignore_runtime_post
1113         ignore_soft        = _ignore_satisfied_buildtime_rebuild
1114
1115 DepPrioritySatisfiedRange.ignore_priority = (
1116         None,
1117         DepPrioritySatisfiedRange._ignore_optional,
1118         DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1119         DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1120         DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1121         DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1122         DepPrioritySatisfiedRange._ignore_runtime_post,
1123         DepPrioritySatisfiedRange._ignore_runtime
1124 )
1125
1126 def _find_deep_system_runtime_deps(graph):
1127         deep_system_deps = set()
1128         node_stack = []
1129         for node in graph:
1130                 if not isinstance(node, Package) or \
1131                         node.operation == 'uninstall':
1132                         continue
1133                 if node.root_config.sets['system'].findAtomForPackage(node):
1134                         node_stack.append(node)
1135
1136         def ignore_priority(priority):
1137                 """
1138                 Ignore non-runtime priorities.
1139                 """
1140                 if isinstance(priority, DepPriority) and \
1141                         (priority.runtime or priority.runtime_post):
1142                         return False
1143                 return True
1144
1145         while node_stack:
1146                 node = node_stack.pop()
1147                 if node in deep_system_deps:
1148                         continue
1149                 deep_system_deps.add(node)
1150                 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1151                         if not isinstance(child, Package) or \
1152                                 child.operation == 'uninstall':
1153                                 continue
1154                         node_stack.append(child)
1155
1156         return deep_system_deps
1157
1158 class FakeVartree(portage.vartree):
1159         """This is implements an in-memory copy of a vartree instance that provides
1160         all the interfaces required for use by the depgraph.  The vardb is locked
1161         during the constructor call just long enough to read a copy of the
1162         installed package information.  This allows the depgraph to do it's
1163         dependency calculations without holding a lock on the vardb.  It also
1164         allows things like vardb global updates to be done in memory so that the
1165         user doesn't necessarily need write access to the vardb in cases where
1166         global updates are necessary (updates are performed when necessary if there
1167         is not a matching ebuild in the tree)."""
1168         def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1169                 self._root_config = root_config
1170                 if pkg_cache is None:
1171                         pkg_cache = {}
1172                 real_vartree = root_config.trees["vartree"]
1173                 portdb = root_config.trees["porttree"].dbapi
1174                 self.root = real_vartree.root
1175                 self.settings = real_vartree.settings
1176                 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1177                 if "_mtime_" not in mykeys:
1178                         mykeys.append("_mtime_")
1179                 self._db_keys = mykeys
1180                 self._pkg_cache = pkg_cache
1181                 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1182                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1183                 try:
1184                         # At least the parent needs to exist for the lock file.
1185                         portage.util.ensure_dirs(vdb_path)
1186                 except portage.exception.PortageException:
1187                         pass
1188                 vdb_lock = None
1189                 try:
1190                         if acquire_lock and os.access(vdb_path, os.W_OK):
1191                                 vdb_lock = portage.locks.lockdir(vdb_path)
1192                         real_dbapi = real_vartree.dbapi
1193                         slot_counters = {}
1194                         for cpv in real_dbapi.cpv_all():
1195                                 cache_key = ("installed", self.root, cpv, "nomerge")
1196                                 pkg = self._pkg_cache.get(cache_key)
1197                                 if pkg is not None:
1198                                         metadata = pkg.metadata
1199                                 else:
1200                                         metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1201                                 myslot = metadata["SLOT"]
1202                                 mycp = portage.dep_getkey(cpv)
1203                                 myslot_atom = "%s:%s" % (mycp, myslot)
1204                                 try:
1205                                         mycounter = long(metadata["COUNTER"])
1206                                 except ValueError:
1207                                         mycounter = 0
1208                                         metadata["COUNTER"] = str(mycounter)
1209                                 other_counter = slot_counters.get(myslot_atom, None)
1210                                 if other_counter is not None:
1211                                         if other_counter > mycounter:
1212                                                 continue
1213                                 slot_counters[myslot_atom] = mycounter
1214                                 if pkg is None:
1215                                         pkg = Package(built=True, cpv=cpv,
1216                                                 installed=True, metadata=metadata,
1217                                                 root_config=root_config, type_name="installed")
1218                                 self._pkg_cache[pkg] = pkg
1219                                 self.dbapi.cpv_inject(pkg)
1220                         real_dbapi.flush_cache()
1221                 finally:
1222                         if vdb_lock:
1223                                 portage.locks.unlockdir(vdb_lock)
1224                 # Populate the old-style virtuals using the cached values.
1225                 if not self.settings.treeVirtuals:
1226                         self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1227                                 portage.getCPFromCPV, self.get_all_provides())
1228
1229                 # Intialize variables needed for lazy cache pulls of the live ebuild
1230                 # metadata.  This ensures that the vardb lock is released ASAP, without
1231                 # being delayed in case cache generation is triggered.
1232                 self._aux_get = self.dbapi.aux_get
1233                 self.dbapi.aux_get = self._aux_get_wrapper
1234                 self._match = self.dbapi.match
1235                 self.dbapi.match = self._match_wrapper
1236                 self._aux_get_history = set()
1237                 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1238                 self._portdb = portdb
1239                 self._global_updates = None
1240
1241         def _match_wrapper(self, cpv, use_cache=1):
1242                 """
1243                 Make sure the metadata in Package instances gets updated for any
1244                 cpv that is returned from a match() call, since the metadata can
1245                 be accessed directly from the Package instance instead of via
1246                 aux_get().
1247                 """
1248                 matches = self._match(cpv, use_cache=use_cache)
1249                 for cpv in matches:
1250                         if cpv in self._aux_get_history:
1251                                 continue
1252                         self._aux_get_wrapper(cpv, [])
1253                 return matches
1254
1255         def _aux_get_wrapper(self, pkg, wants):
1256                 if pkg in self._aux_get_history:
1257                         return self._aux_get(pkg, wants)
1258                 self._aux_get_history.add(pkg)
1259                 try:
1260                         # Use the live ebuild metadata if possible.
1261                         live_metadata = dict(izip(self._portdb_keys,
1262                                 self._portdb.aux_get(pkg, self._portdb_keys)))
1263                         if not portage.eapi_is_supported(live_metadata["EAPI"]):
1264                                 raise KeyError(pkg)
1265                         self.dbapi.aux_update(pkg, live_metadata)
1266                 except (KeyError, portage.exception.PortageException):
1267                         if self._global_updates is None:
1268                                 self._global_updates = \
1269                                         grab_global_updates(self._portdb.porttree_root)
1270                         perform_global_updates(
1271                                 pkg, self.dbapi, self._global_updates)
1272                 return self._aux_get(pkg, wants)
1273
1274         def sync(self, acquire_lock=1):
1275                 """
1276                 Call this method to synchronize state with the real vardb
1277                 after one or more packages may have been installed or
1278                 uninstalled.
1279                 """
1280                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1281                 try:
1282                         # At least the parent needs to exist for the lock file.
1283                         portage.util.ensure_dirs(vdb_path)
1284                 except portage.exception.PortageException:
1285                         pass
1286                 vdb_lock = None
1287                 try:
1288                         if acquire_lock and os.access(vdb_path, os.W_OK):
1289                                 vdb_lock = portage.locks.lockdir(vdb_path)
1290                         self._sync()
1291                 finally:
1292                         if vdb_lock:
1293                                 portage.locks.unlockdir(vdb_lock)
1294
1295         def _sync(self):
1296
1297                 real_vardb = self._root_config.trees["vartree"].dbapi
1298                 current_cpv_set = frozenset(real_vardb.cpv_all())
1299                 pkg_vardb = self.dbapi
1300                 aux_get_history = self._aux_get_history
1301
1302                 # Remove any packages that have been uninstalled.
1303                 for pkg in list(pkg_vardb):
1304                         if pkg.cpv not in current_cpv_set:
1305                                 pkg_vardb.cpv_remove(pkg)
1306                                 aux_get_history.discard(pkg.cpv)
1307
1308                 # Validate counters and timestamps.
1309                 slot_counters = {}
1310                 root = self.root
1311                 validation_keys = ["COUNTER", "_mtime_"]
1312                 for cpv in current_cpv_set:
1313
1314                         pkg_hash_key = ("installed", root, cpv, "nomerge")
1315                         pkg = pkg_vardb.get(pkg_hash_key)
1316                         if pkg is not None:
1317                                 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1318                                 try:
1319                                         counter = long(counter)
1320                                 except ValueError:
1321                                         counter = 0
1322
1323                                 if counter != pkg.counter or \
1324                                         mtime != pkg.mtime:
1325                                         pkg_vardb.cpv_remove(pkg)
1326                                         aux_get_history.discard(pkg.cpv)
1327                                         pkg = None
1328
1329                         if pkg is None:
1330                                 pkg = self._pkg(cpv)
1331
1332                         other_counter = slot_counters.get(pkg.slot_atom)
1333                         if other_counter is not None:
1334                                 if other_counter > pkg.counter:
1335                                         continue
1336
1337                         slot_counters[pkg.slot_atom] = pkg.counter
1338                         pkg_vardb.cpv_inject(pkg)
1339
1340                 real_vardb.flush_cache()
1341
1342         def _pkg(self, cpv):
1343                 root_config = self._root_config
1344                 real_vardb = root_config.trees["vartree"].dbapi
1345                 pkg = Package(cpv=cpv, installed=True,
1346                         metadata=izip(self._db_keys,
1347                         real_vardb.aux_get(cpv, self._db_keys)),
1348                         root_config=root_config,
1349                         type_name="installed")
1350
1351                 try:
1352                         mycounter = long(pkg.metadata["COUNTER"])
1353                 except ValueError:
1354                         mycounter = 0
1355                         pkg.metadata["COUNTER"] = str(mycounter)
1356
1357                 return pkg
1358
1359 def grab_global_updates(portdir):
1360         from portage.update import grab_updates, parse_updates
1361         updpath = os.path.join(portdir, "profiles", "updates")
1362         try:
1363                 rawupdates = grab_updates(updpath)
1364         except portage.exception.DirectoryNotFound:
1365                 rawupdates = []
1366         upd_commands = []
1367         for mykey, mystat, mycontent in rawupdates:
1368                 commands, errors = parse_updates(mycontent)
1369                 upd_commands.extend(commands)
1370         return upd_commands
1371
1372 def perform_global_updates(mycpv, mydb, mycommands):
1373         from portage.update import update_dbentries
1374         aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1375         aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1376         updates = update_dbentries(mycommands, aux_dict)
1377         if updates:
1378                 mydb.aux_update(mycpv, updates)
1379
1380 def visible(pkgsettings, pkg):
1381         """
1382         Check if a package is visible. This can raise an InvalidDependString
1383         exception if LICENSE is invalid.
1384         TODO: optionally generate a list of masking reasons
1385         @rtype: Boolean
1386         @returns: True if the package is visible, False otherwise.
1387         """
1388         if not pkg.metadata["SLOT"]:
1389                 return False
1390         if not pkg.installed:
1391                 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1392                         return False
1393         eapi = pkg.metadata["EAPI"]
1394         if not portage.eapi_is_supported(eapi):
1395                 return False
1396         if not pkg.installed:
1397                 if portage._eapi_is_deprecated(eapi):
1398                         return False
1399                 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1400                         return False
1401         if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1402                 return False
1403         if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1404                 return False
1405         try:
1406                 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1407                         return False
1408         except portage.exception.InvalidDependString:
1409                 return False
1410         return True
1411
1412 def get_masking_status(pkg, pkgsettings, root_config):
1413
1414         mreasons = portage.getmaskingstatus(
1415                 pkg, settings=pkgsettings,
1416                 portdb=root_config.trees["porttree"].dbapi)
1417
1418         if not pkg.installed:
1419                 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1420                         mreasons.append("CHOST: %s" % \
1421                                 pkg.metadata["CHOST"])
1422
1423         if not pkg.metadata["SLOT"]:
1424                 mreasons.append("invalid: SLOT is undefined")
1425
1426         return mreasons
1427
1428 def get_mask_info(root_config, cpv, pkgsettings,
1429         db, pkg_type, built, installed, db_keys):
1430         eapi_masked = False
1431         try:
1432                 metadata = dict(izip(db_keys,
1433                         db.aux_get(cpv, db_keys)))
1434         except KeyError:
1435                 metadata = None
1436         if metadata and not built:
1437                 pkgsettings.setcpv(cpv, mydb=metadata)
1438                 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1439                 metadata['CHOST'] = pkgsettings.get('CHOST', '')
1440         if metadata is None:
1441                 mreasons = ["corruption"]
1442         else:
1443                 pkg = Package(type_name=pkg_type, root_config=root_config,
1444                         cpv=cpv, built=built, installed=installed, metadata=metadata)
1445                 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1446         return metadata, mreasons
1447
1448 def show_masked_packages(masked_packages):
1449         shown_licenses = set()
1450         shown_comments = set()
1451         # Maybe there is both an ebuild and a binary. Only
1452         # show one of them to avoid redundant appearance.
1453         shown_cpvs = set()
1454         have_eapi_mask = False
1455         for (root_config, pkgsettings, cpv,
1456                 metadata, mreasons) in masked_packages:
1457                 if cpv in shown_cpvs:
1458                         continue
1459                 shown_cpvs.add(cpv)
1460                 comment, filename = None, None
1461                 if "package.mask" in mreasons:
1462                         comment, filename = \
1463                                 portage.getmaskingreason(
1464                                 cpv, metadata=metadata,
1465                                 settings=pkgsettings,
1466                                 portdb=root_config.trees["porttree"].dbapi,
1467                                 return_location=True)
1468                 missing_licenses = []
1469                 if metadata:
1470                         if not portage.eapi_is_supported(metadata["EAPI"]):
1471                                 have_eapi_mask = True
1472                         try:
1473                                 missing_licenses = \
1474                                         pkgsettings._getMissingLicenses(
1475                                                 cpv, metadata)
1476                         except portage.exception.InvalidDependString:
1477                                 # This will have already been reported
1478                                 # above via mreasons.
1479                                 pass
1480
1481                 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1482                 if comment and comment not in shown_comments:
1483                         print filename+":"
1484                         print comment
1485                         shown_comments.add(comment)
1486                 portdb = root_config.trees["porttree"].dbapi
1487                 for l in missing_licenses:
1488                         l_path = portdb.findLicensePath(l)
1489                         if l in shown_licenses:
1490                                 continue
1491                         msg = ("A copy of the '%s' license" + \
1492                         " is located at '%s'.") % (l, l_path)
1493                         print msg
1494                         print
1495                         shown_licenses.add(l)
1496         return have_eapi_mask
1497
1498 class Task(SlotObject):
1499         __slots__ = ("_hash_key", "_hash_value")
1500
1501         def _get_hash_key(self):
1502                 hash_key = getattr(self, "_hash_key", None)
1503                 if hash_key is None:
1504                         raise NotImplementedError(self)
1505                 return hash_key
1506
1507         def __eq__(self, other):
1508                 return self._get_hash_key() == other
1509
1510         def __ne__(self, other):
1511                 return self._get_hash_key() != other
1512
1513         def __hash__(self):
1514                 hash_value = getattr(self, "_hash_value", None)
1515                 if hash_value is None:
1516                         self._hash_value = hash(self._get_hash_key())
1517                 return self._hash_value
1518
1519         def __len__(self):
1520                 return len(self._get_hash_key())
1521
1522         def __getitem__(self, key):
1523                 return self._get_hash_key()[key]
1524
1525         def __iter__(self):
1526                 return iter(self._get_hash_key())
1527
1528         def __contains__(self, key):
1529                 return key in self._get_hash_key()
1530
1531         def __str__(self):
1532                 return str(self._get_hash_key())
1533
1534 class Blocker(Task):
1535
1536         __hash__ = Task.__hash__
1537         __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1538
1539         def __init__(self, **kwargs):
1540                 Task.__init__(self, **kwargs)
1541                 self.cp = portage.dep_getkey(self.atom)
1542
1543         def _get_hash_key(self):
1544                 hash_key = getattr(self, "_hash_key", None)
1545                 if hash_key is None:
1546                         self._hash_key = \
1547                                 ("blocks", self.root, self.atom, self.eapi)
1548                 return self._hash_key
1549
1550 class Package(Task):
1551
1552         __hash__ = Task.__hash__
1553         __slots__ = ("built", "cpv", "depth",
1554                 "installed", "metadata", "onlydeps", "operation",
1555                 "root_config", "type_name",
1556                 "category", "counter", "cp", "cpv_split",
1557                 "inherited", "iuse", "mtime",
1558                 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1559
1560         metadata_keys = [
1561                 "CHOST", "COUNTER", "DEPEND", "EAPI",
1562                 "INHERITED", "IUSE", "KEYWORDS",
1563                 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1564                 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1565
1566         def __init__(self, **kwargs):
1567                 Task.__init__(self, **kwargs)
1568                 self.root = self.root_config.root
1569                 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1570                 self.cp = portage.cpv_getkey(self.cpv)
1571                 slot = self.slot
1572                 if not slot:
1573                         # Avoid an InvalidAtom exception when creating slot_atom.
1574                         # This package instance will be masked due to empty SLOT.
1575                         slot = '0'
1576                 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, slot))
1577                 self.category, self.pf = portage.catsplit(self.cpv)
1578                 self.cpv_split = portage.catpkgsplit(self.cpv)
1579                 self.pv_split = self.cpv_split[1:]
1580
1581         class _use(object):
1582
1583                 __slots__ = ("__weakref__", "enabled")
1584
1585                 def __init__(self, use):
1586                         self.enabled = frozenset(use)
1587
1588         class _iuse(object):
1589
1590                 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1591
1592                 def __init__(self, tokens, iuse_implicit):
1593                         self.tokens = tuple(tokens)
1594                         self.iuse_implicit = iuse_implicit
1595                         enabled = []
1596                         disabled = []
1597                         other = []
1598                         for x in tokens:
1599                                 prefix = x[:1]
1600                                 if prefix == "+":
1601                                         enabled.append(x[1:])
1602                                 elif prefix == "-":
1603                                         disabled.append(x[1:])
1604                                 else:
1605                                         other.append(x)
1606                         self.enabled = frozenset(enabled)
1607                         self.disabled = frozenset(disabled)
1608                         self.all = frozenset(chain(enabled, disabled, other))
1609
1610                 def __getattribute__(self, name):
1611                         if name == "regex":
1612                                 try:
1613                                         return object.__getattribute__(self, "regex")
1614                                 except AttributeError:
1615                                         all = object.__getattribute__(self, "all")
1616                                         iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1617                                         # Escape anything except ".*" which is supposed
1618                                         # to pass through from _get_implicit_iuse()
1619                                         regex = (re.escape(x) for x in chain(all, iuse_implicit))
1620                                         regex = "^(%s)$" % "|".join(regex)
1621                                         regex = regex.replace("\\.\\*", ".*")
1622                                         self.regex = re.compile(regex)
1623                         return object.__getattribute__(self, name)
1624
1625         def _get_hash_key(self):
1626                 hash_key = getattr(self, "_hash_key", None)
1627                 if hash_key is None:
1628                         if self.operation is None:
1629                                 self.operation = "merge"
1630                                 if self.onlydeps or self.installed:
1631                                         self.operation = "nomerge"
1632                         self._hash_key = \
1633                                 (self.type_name, self.root, self.cpv, self.operation)
1634                 return self._hash_key
1635
1636         def __lt__(self, other):
1637                 if other.cp != self.cp:
1638                         return False
1639                 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1640                         return True
1641                 return False
1642
1643         def __le__(self, other):
1644                 if other.cp != self.cp:
1645                         return False
1646                 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1647                         return True
1648                 return False
1649
1650         def __gt__(self, other):
1651                 if other.cp != self.cp:
1652                         return False
1653                 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1654                         return True
1655                 return False
1656
1657         def __ge__(self, other):
1658                 if other.cp != self.cp:
1659                         return False
1660                 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1661                         return True
1662                 return False
1663
1664 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1665         if not x.startswith("UNUSED_"))
1666 _all_metadata_keys.discard("CDEPEND")
1667 _all_metadata_keys.update(Package.metadata_keys)
1668
1669 from portage.cache.mappings import slot_dict_class
1670 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1671
1672 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1673         """
1674         Detect metadata updates and synchronize Package attributes.
1675         """
1676
1677         __slots__ = ("_pkg",)
1678         _wrapped_keys = frozenset(
1679                 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1680
1681         def __init__(self, pkg, metadata):
1682                 _PackageMetadataWrapperBase.__init__(self)
1683                 self._pkg = pkg
1684                 self.update(metadata)
1685
1686         def __setitem__(self, k, v):
1687                 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1688                 if k in self._wrapped_keys:
1689                         getattr(self, "_set_" + k.lower())(k, v)
1690
1691         def _set_inherited(self, k, v):
1692                 if isinstance(v, basestring):
1693                         v = frozenset(v.split())
1694                 self._pkg.inherited = v
1695
1696         def _set_iuse(self, k, v):
1697                 self._pkg.iuse = self._pkg._iuse(
1698                         v.split(), self._pkg.root_config.iuse_implicit)
1699
1700         def _set_slot(self, k, v):
1701                 self._pkg.slot = v
1702
1703         def _set_use(self, k, v):
1704                 self._pkg.use = self._pkg._use(v.split())
1705
1706         def _set_counter(self, k, v):
1707                 if isinstance(v, basestring):
1708                         try:
1709                                 v = long(v.strip())
1710                         except ValueError:
1711                                 v = 0
1712                 self._pkg.counter = v
1713
1714         def _set__mtime_(self, k, v):
1715                 if isinstance(v, basestring):
1716                         try:
1717                                 v = long(v.strip())
1718                         except ValueError:
1719                                 v = 0
1720                 self._pkg.mtime = v
1721
1722 class EbuildFetchonly(SlotObject):
1723
1724         __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1725
1726         def execute(self):
1727                 settings = self.settings
1728                 pkg = self.pkg
1729                 portdb = pkg.root_config.trees["porttree"].dbapi
1730                 ebuild_path = portdb.findname(pkg.cpv)
1731                 settings.setcpv(pkg)
1732                 debug = settings.get("PORTAGE_DEBUG") == "1"
1733                 use_cache = 1 # always true
1734                 portage.doebuild_environment(ebuild_path, "fetch",
1735                         settings["ROOT"], settings, debug, use_cache, portdb)
1736                 restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1737
1738                 if restrict_fetch:
1739                         rval = self._execute_with_builddir()
1740                 else:
1741                         rval = portage.doebuild(ebuild_path, "fetch",
1742                                 settings["ROOT"], settings, debug=debug,
1743                                 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1744                                 mydbapi=portdb, tree="porttree")
1745
1746                         if rval != os.EX_OK:
1747                                 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1748                                 eerror(msg, phase="unpack", key=pkg.cpv)
1749
1750                 return rval
1751
1752         def _execute_with_builddir(self):
1753                 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1754                 # ensuring sane $PWD (bug #239560) and storing elog
1755                 # messages. Use a private temp directory, in order
1756                 # to avoid locking the main one.
1757                 settings = self.settings
1758                 global_tmpdir = settings["PORTAGE_TMPDIR"]
1759                 from tempfile import mkdtemp
1760                 try:
1761                         private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1762                 except OSError, e:
1763                         if e.errno != portage.exception.PermissionDenied.errno:
1764                                 raise
1765                         raise portage.exception.PermissionDenied(global_tmpdir)
1766                 settings["PORTAGE_TMPDIR"] = private_tmpdir
1767                 settings.backup_changes("PORTAGE_TMPDIR")
1768                 try:
1769                         retval = self._execute()
1770                 finally:
1771                         settings["PORTAGE_TMPDIR"] = global_tmpdir
1772                         settings.backup_changes("PORTAGE_TMPDIR")
1773                         shutil.rmtree(private_tmpdir)
1774                 return retval
1775
1776         def _execute(self):
1777                 settings = self.settings
1778                 pkg = self.pkg
1779                 root_config = pkg.root_config
1780                 portdb = root_config.trees["porttree"].dbapi
1781                 ebuild_path = portdb.findname(pkg.cpv)
1782                 debug = settings.get("PORTAGE_DEBUG") == "1"
1783                 portage.prepare_build_dirs(self.pkg.root, self.settings, 0)
1784
1785                 retval = portage.doebuild(ebuild_path, "fetch",
1786                         self.settings["ROOT"], self.settings, debug=debug,
1787                         listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1788                         mydbapi=portdb, tree="porttree")
1789
1790                 if retval != os.EX_OK:
1791                         msg = "Fetch failed for '%s'" % (pkg.cpv,)
1792                         eerror(msg, phase="unpack", key=pkg.cpv)
1793
1794                 portage.elog.elog_process(self.pkg.cpv, self.settings)
1795                 return retval
1796
1797 class PollConstants(object):
1798
1799         """
1800         Provides POLL* constants that are equivalent to those from the
1801         select module, for use by PollSelectAdapter.
1802         """
1803
1804         names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1805         v = 1
1806         for k in names:
1807                 locals()[k] = getattr(select, k, v)
1808                 v *= 2
1809         del k, v
1810
1811 class AsynchronousTask(SlotObject):
1812         """
1813         Subclasses override _wait() and _poll() so that calls
1814         to public methods can be wrapped for implementing
1815         hooks such as exit listener notification.
1816
1817         Sublasses should call self.wait() to notify exit listeners after
1818         the task is complete and self.returncode has been set.
1819         """
1820
1821         __slots__ = ("background", "cancelled", "returncode") + \
1822                 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1823
1824         def start(self):
1825                 """
1826                 Start an asynchronous task and then return as soon as possible.
1827                 """
1828                 self._start_hook()
1829                 self._start()
1830
1831         def _start(self):
1832                 raise NotImplementedError(self)
1833
1834         def isAlive(self):
1835                 return self.returncode is None
1836
1837         def poll(self):
1838                 self._wait_hook()
1839                 return self._poll()
1840
1841         def _poll(self):
1842                 return self.returncode
1843
1844         def wait(self):
1845                 if self.returncode is None:
1846                         self._wait()
1847                 self._wait_hook()
1848                 return self.returncode
1849
1850         def _wait(self):
1851                 return self.returncode
1852
1853         def cancel(self):
1854                 self.cancelled = True
1855                 self.wait()
1856
1857         def addStartListener(self, f):
1858                 """
1859                 The function will be called with one argument, a reference to self.
1860                 """
1861                 if self._start_listeners is None:
1862                         self._start_listeners = []
1863                 self._start_listeners.append(f)
1864
1865         def removeStartListener(self, f):
1866                 if self._start_listeners is None:
1867                         return
1868                 self._start_listeners.remove(f)
1869
1870         def _start_hook(self):
1871                 if self._start_listeners is not None:
1872                         start_listeners = self._start_listeners
1873                         self._start_listeners = None
1874
1875                         for f in start_listeners:
1876                                 f(self)
1877
1878         def addExitListener(self, f):
1879                 """
1880                 The function will be called with one argument, a reference to self.
1881                 """
1882                 if self._exit_listeners is None:
1883                         self._exit_listeners = []
1884                 self._exit_listeners.append(f)
1885
1886         def removeExitListener(self, f):
1887                 if self._exit_listeners is None:
1888                         if self._exit_listener_stack is not None:
1889                                 self._exit_listener_stack.remove(f)
1890                         return
1891                 self._exit_listeners.remove(f)
1892
1893         def _wait_hook(self):
1894                 """
1895                 Call this method after the task completes, just before returning
1896                 the returncode from wait() or poll(). This hook is
1897                 used to trigger exit listeners when the returncode first
1898                 becomes available.
1899                 """
1900                 if self.returncode is not None and \
1901                         self._exit_listeners is not None:
1902
1903                         # This prevents recursion, in case one of the
1904                         # exit handlers triggers this method again by
1905                         # calling wait(). Use a stack that gives
1906                         # removeExitListener() an opportunity to consume
1907                         # listeners from the stack, before they can get
1908                         # called below. This is necessary because a call
1909                         # to one exit listener may result in a call to
1910                         # removeExitListener() for another listener on
1911                         # the stack. That listener needs to be removed
1912                         # from the stack since it would be inconsistent
1913                         # to call it after it has been been passed into
1914                         # removeExitListener().
1915                         self._exit_listener_stack = self._exit_listeners
1916                         self._exit_listeners = None
1917
1918                         self._exit_listener_stack.reverse()
1919                         while self._exit_listener_stack:
1920                                 self._exit_listener_stack.pop()(self)
1921
1922 class AbstractPollTask(AsynchronousTask):
1923
1924         __slots__ = ("scheduler",) + \
1925                 ("_registered",)
1926
1927         _bufsize = 4096
1928         _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1929         _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1930                 _exceptional_events
1931
1932         def _unregister(self):
1933                 raise NotImplementedError(self)
1934
1935         def _unregister_if_appropriate(self, event):
1936                 if self._registered:
1937                         if event & self._exceptional_events:
1938                                 self._unregister()
1939                                 self.cancel()
1940                         elif event & PollConstants.POLLHUP:
1941                                 self._unregister()
1942                                 self.wait()
1943
1944 class PipeReader(AbstractPollTask):
1945
1946         """
1947         Reads output from one or more files and saves it in memory,
1948         for retrieval via the getvalue() method. This is driven by
1949         the scheduler's poll() loop, so it runs entirely within the
1950         current process.
1951         """
1952
1953         __slots__ = ("input_files",) + \
1954                 ("_read_data", "_reg_ids")
1955
1956         def _start(self):
1957                 self._reg_ids = set()
1958                 self._read_data = []
1959                 for k, f in self.input_files.iteritems():
1960                         fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1961                                 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1962                         self._reg_ids.add(self.scheduler.register(f.fileno(),
1963                                 self._registered_events, self._output_handler))
1964                 self._registered = True
1965
1966         def isAlive(self):
1967                 return self._registered
1968
1969         def cancel(self):
1970                 if self.returncode is None:
1971                         self.returncode = 1
1972                         self.cancelled = True
1973                 self.wait()
1974
1975         def _wait(self):
1976                 if self.returncode is not None:
1977                         return self.returncode
1978
1979                 if self._registered:
1980                         self.scheduler.schedule(self._reg_ids)
1981                         self._unregister()
1982
1983                 self.returncode = os.EX_OK
1984                 return self.returncode
1985
1986         def getvalue(self):
1987                 """Retrieve the entire contents"""
1988                 if sys.hexversion >= 0x3000000:
1989                         return bytes().join(self._read_data)
1990                 return "".join(self._read_data)
1991
1992         def close(self):
1993                 """Free the memory buffer."""
1994                 self._read_data = None
1995
1996         def _output_handler(self, fd, event):
1997
1998                 if event & PollConstants.POLLIN:
1999
2000                         for f in self.input_files.itervalues():
2001                                 if fd == f.fileno():
2002                                         break
2003
2004                         buf = array.array('B')
2005                         try:
2006                                 buf.fromfile(f, self._bufsize)
2007                         except EOFError:
2008                                 pass
2009
2010                         if buf:
2011                                 self._read_data.append(buf.tostring())
2012                         else:
2013                                 self._unregister()
2014                                 self.wait()
2015
2016                 self._unregister_if_appropriate(event)
2017                 return self._registered
2018
2019         def _unregister(self):
2020                 """
2021                 Unregister from the scheduler and close open files.
2022                 """
2023
2024                 self._registered = False
2025
2026                 if self._reg_ids is not None:
2027                         for reg_id in self._reg_ids:
2028                                 self.scheduler.unregister(reg_id)
2029                         self._reg_ids = None
2030
2031                 if self.input_files is not None:
2032                         for f in self.input_files.itervalues():
2033                                 f.close()
2034                         self.input_files = None
2035
2036 class CompositeTask(AsynchronousTask):
2037
2038         __slots__ = ("scheduler",) + ("_current_task",)
2039
2040         def isAlive(self):
2041                 return self._current_task is not None
2042
2043         def cancel(self):
2044                 self.cancelled = True
2045                 if self._current_task is not None:
2046                         self._current_task.cancel()
2047
2048         def _poll(self):
2049                 """
2050                 This does a loop calling self._current_task.poll()
2051                 repeatedly as long as the value of self._current_task
2052                 keeps changing. It calls poll() a maximum of one time
2053                 for a given self._current_task instance. This is useful
2054                 since calling poll() on a task can trigger advance to
2055                 the next task could eventually lead to the returncode
2056                 being set in cases when polling only a single task would
2057                 not have the same effect.
2058                 """
2059
2060                 prev = None
2061                 while True:
2062                         task = self._current_task
2063                         if task is None or task is prev:
2064                                 # don't poll the same task more than once
2065                                 break
2066                         task.poll()
2067                         prev = task
2068
2069                 return self.returncode
2070
2071         def _wait(self):
2072
2073                 prev = None
2074                 while True:
2075                         task = self._current_task
2076                         if task is None:
2077                                 # don't wait for the same task more than once
2078                                 break
2079                         if task is prev:
2080                                 # Before the task.wait() method returned, an exit
2081                                 # listener should have set self._current_task to either
2082                                 # a different task or None. Something is wrong.
2083                                 raise AssertionError("self._current_task has not " + \
2084                                         "changed since calling wait", self, task)
2085                         task.wait()
2086                         prev = task
2087
2088                 return self.returncode
2089
2090         def _assert_current(self, task):
2091                 """
2092                 Raises an AssertionError if the given task is not the
2093                 same one as self._current_task. This can be useful
2094                 for detecting bugs.
2095                 """
2096                 if task is not self._current_task:
2097                         raise AssertionError("Unrecognized task: %s" % (task,))
2098
2099         def _default_exit(self, task):
2100                 """
2101                 Calls _assert_current() on the given task and then sets the
2102                 composite returncode attribute if task.returncode != os.EX_OK.
2103                 If the task failed then self._current_task will be set to None.
2104                 Subclasses can use this as a generic task exit callback.
2105
2106                 @rtype: int
2107                 @returns: The task.returncode attribute.
2108                 """
2109                 self._assert_current(task)
2110                 if task.returncode != os.EX_OK:
2111                         self.returncode = task.returncode
2112                         self._current_task = None
2113                 return task.returncode
2114
2115         def _final_exit(self, task):
2116                 """
2117                 Assumes that task is the final task of this composite task.
2118                 Calls _default_exit() and sets self.returncode to the task's
2119                 returncode and sets self._current_task to None.
2120                 """
2121                 self._default_exit(task)
2122                 self._current_task = None
2123                 self.returncode = task.returncode
2124                 return self.returncode
2125
2126         def _default_final_exit(self, task):
2127                 """
2128                 This calls _final_exit() and then wait().
2129
2130                 Subclasses can use this as a generic final task exit callback.
2131
2132                 """
2133                 self._final_exit(task)
2134                 return self.wait()
2135
2136         def _start_task(self, task, exit_handler):
2137                 """
2138                 Register exit handler for the given task, set it
2139                 as self._current_task, and call task.start().
2140
2141                 Subclasses can use this as a generic way to start
2142                 a task.
2143
2144                 """
2145                 task.addExitListener(exit_handler)
2146                 self._current_task = task
2147                 task.start()
2148
2149 class TaskSequence(CompositeTask):
2150         """
2151         A collection of tasks that executes sequentially. Each task
2152         must have a addExitListener() method that can be used as
2153         a means to trigger movement from one task to the next.
2154         """
2155
2156         __slots__ = ("_task_queue",)
2157
2158         def __init__(self, **kwargs):
2159                 AsynchronousTask.__init__(self, **kwargs)
2160                 self._task_queue = deque()
2161
2162         def add(self, task):
2163                 self._task_queue.append(task)
2164
2165         def _start(self):
2166                 self._start_next_task()
2167
2168         def cancel(self):
2169                 self._task_queue.clear()
2170                 CompositeTask.cancel(self)
2171
2172         def _start_next_task(self):
2173                 self._start_task(self._task_queue.popleft(),
2174                         self._task_exit_handler)
2175
2176         def _task_exit_handler(self, task):
2177                 if self._default_exit(task) != os.EX_OK:
2178                         self.wait()
2179                 elif self._task_queue:
2180                         self._start_next_task()
2181                 else:
2182                         self._final_exit(task)
2183                         self.wait()
2184
2185 class SubProcess(AbstractPollTask):
2186
2187         __slots__ = ("pid",) + \
2188                 ("_files", "_reg_id")
2189
2190         # A file descriptor is required for the scheduler to monitor changes from
2191         # inside a poll() loop. When logging is not enabled, create a pipe just to
2192         # serve this purpose alone.
2193         _dummy_pipe_fd = 9
2194
2195         def _poll(self):
2196                 if self.returncode is not None:
2197                         return self.returncode
2198                 if self.pid is None:
2199                         return self.returncode
2200                 if self._registered:
2201                         return self.returncode
2202
2203                 try:
2204                         retval = os.waitpid(self.pid, os.WNOHANG)
2205                 except OSError, e:
2206                         if e.errno != errno.ECHILD:
2207                                 raise
2208                         del e
2209                         retval = (self.pid, 1)
2210
2211                 if retval == (0, 0):
2212                         return None
2213                 self._set_returncode(retval)
2214                 return self.returncode
2215
2216         def cancel(self):
2217                 if self.isAlive():
2218                         try:
2219                                 os.kill(self.pid, signal.SIGTERM)
2220                         except OSError, e:
2221                                 if e.errno != errno.ESRCH:
2222                                         raise
2223                                 del e
2224
2225                 self.cancelled = True
2226                 if self.pid is not None:
2227                         self.wait()
2228                 return self.returncode
2229
2230         def isAlive(self):
2231                 return self.pid is not None and \
2232                         self.returncode is None
2233
2234         def _wait(self):
2235
2236                 if self.returncode is not None:
2237                         return self.returncode
2238
2239                 if self._registered:
2240                         self.scheduler.schedule(self._reg_id)
2241                         self._unregister()
2242                         if self.returncode is not None:
2243                                 return self.returncode
2244
2245                 try:
2246                         wait_retval = os.waitpid(self.pid, 0)
2247                 except OSError, e:
2248                         if e.errno != errno.ECHILD:
2249                                 raise
2250                         del e
2251                         self._set_returncode((self.pid, 1))
2252                 else:
2253                         self._set_returncode(wait_retval)
2254
2255                 return self.returncode
2256
2257         def _unregister(self):
2258                 """
2259                 Unregister from the scheduler and close open files.
2260                 """
2261
2262                 self._registered = False
2263
2264                 if self._reg_id is not None:
2265                         self.scheduler.unregister(self._reg_id)
2266                         self._reg_id = None
2267
2268                 if self._files is not None:
2269                         for f in self._files.itervalues():
2270                                 f.close()
2271                         self._files = None
2272
2273         def _set_returncode(self, wait_retval):
2274
2275                 retval = wait_retval[1]
2276
2277                 if retval != os.EX_OK:
2278                         if retval & 0xff:
2279                                 retval = (retval & 0xff) << 8
2280                         else:
2281                                 retval = retval >> 8
2282
2283                 self.returncode = retval
2284
2285 class SpawnProcess(SubProcess):
2286
2287         """
2288         Constructor keyword args are passed into portage.process.spawn().
2289         The required "args" keyword argument will be passed as the first
2290         spawn() argument.
2291         """
2292
2293         _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2294                 "uid", "gid", "groups", "umask", "logfile",
2295                 "path_lookup", "pre_exec")
2296
2297         __slots__ = ("args",) + \
2298                 _spawn_kwarg_names
2299
2300         _file_names = ("log", "process", "stdout")
2301         _files_dict = slot_dict_class(_file_names, prefix="")
2302
2303         def _start(self):
2304
2305                 if self.cancelled:
2306                         return
2307
2308                 if self.fd_pipes is None:
2309                         self.fd_pipes = {}
2310                 fd_pipes = self.fd_pipes
2311                 fd_pipes.setdefault(0, sys.stdin.fileno())
2312                 fd_pipes.setdefault(1, sys.stdout.fileno())
2313                 fd_pipes.setdefault(2, sys.stderr.fileno())
2314
2315                 # flush any pending output
2316                 for fd in fd_pipes.itervalues():
2317                         if fd == sys.stdout.fileno():
2318                                 sys.stdout.flush()
2319                         if fd == sys.stderr.fileno():
2320                                 sys.stderr.flush()
2321
2322                 logfile = self.logfile
2323                 self._files = self._files_dict()
2324                 files = self._files
2325
2326                 master_fd, slave_fd = self._pipe(fd_pipes)
2327                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2328                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2329
2330                 null_input = None
2331                 fd_pipes_orig = fd_pipes.copy()
2332                 if self.background:
2333                         # TODO: Use job control functions like tcsetpgrp() to control
2334                         # access to stdin. Until then, use /dev/null so that any
2335                         # attempts to read from stdin will immediately return EOF
2336                         # instead of blocking indefinitely.
2337                         null_input = open('/dev/null', 'rb')
2338                         fd_pipes[0] = null_input.fileno()
2339                 else:
2340                         fd_pipes[0] = fd_pipes_orig[0]
2341
2342                 files.process = os.fdopen(master_fd, 'rb')
2343                 if logfile is not None:
2344
2345                         fd_pipes[1] = slave_fd
2346                         fd_pipes[2] = slave_fd
2347
2348                         files.log = open(logfile, mode='ab')
2349                         portage.util.apply_secpass_permissions(logfile,
2350                                 uid=portage.portage_uid, gid=portage.portage_gid,
2351                                 mode=0660)
2352
2353                         if not self.background:
2354                                 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2355
2356                         output_handler = self._output_handler
2357
2358                 else:
2359
2360                         # Create a dummy pipe so the scheduler can monitor
2361                         # the process from inside a poll() loop.
2362                         fd_pipes[self._dummy_pipe_fd] = slave_fd
2363                         if self.background:
2364                                 fd_pipes[1] = slave_fd
2365                                 fd_pipes[2] = slave_fd
2366                         output_handler = self._dummy_handler
2367
2368                 kwargs = {}
2369                 for k in self._spawn_kwarg_names:
2370                         v = getattr(self, k)
2371                         if v is not None:
2372                                 kwargs[k] = v
2373
2374                 kwargs["fd_pipes"] = fd_pipes
2375                 kwargs["returnpid"] = True
2376                 kwargs.pop("logfile", None)
2377
2378                 self._reg_id = self.scheduler.register(files.process.fileno(),
2379                         self._registered_events, output_handler)
2380                 self._registered = True
2381
2382                 retval = self._spawn(self.args, **kwargs)
2383
2384                 os.close(slave_fd)
2385                 if null_input is not None:
2386                         null_input.close()
2387
2388                 if isinstance(retval, int):
2389                         # spawn failed
2390                         self._unregister()
2391                         self.returncode = retval
2392                         self.wait()
2393                         return
2394
2395                 self.pid = retval[0]
2396                 portage.process.spawned_pids.remove(self.pid)
2397
2398         def _pipe(self, fd_pipes):
2399                 """
2400                 @type fd_pipes: dict
2401                 @param fd_pipes: pipes from which to copy terminal size if desired.
2402                 """
2403                 return os.pipe()
2404
2405         def _spawn(self, args, **kwargs):
2406                 return portage.process.spawn(args, **kwargs)
2407
2408         def _output_handler(self, fd, event):
2409
2410                 if event & PollConstants.POLLIN:
2411
2412                         files = self._files
2413                         buf = array.array('B')
2414                         try:
2415                                 buf.fromfile(files.process, self._bufsize)
2416                         except EOFError:
2417                                 pass
2418
2419                         if buf:
2420                                 if not self.background:
2421                                         buf.tofile(files.stdout)
2422                                         files.stdout.flush()
2423                                 buf.tofile(files.log)
2424                                 files.log.flush()
2425                         else:
2426                                 self._unregister()
2427                                 self.wait()
2428
2429                 self._unregister_if_appropriate(event)
2430                 return self._registered
2431
2432         def _dummy_handler(self, fd, event):
2433                 """
2434                 This method is mainly interested in detecting EOF, since
2435                 the only purpose of the pipe is to allow the scheduler to
2436                 monitor the process from inside a poll() loop.
2437                 """
2438
2439                 if event & PollConstants.POLLIN:
2440
2441                         buf = array.array('B')
2442                         try:
2443                                 buf.fromfile(self._files.process, self._bufsize)
2444                         except EOFError:
2445                                 pass
2446
2447                         if buf:
2448                                 pass
2449                         else:
2450                                 self._unregister()
2451                                 self.wait()
2452
2453                 self._unregister_if_appropriate(event)
2454                 return self._registered
2455
2456 class MiscFunctionsProcess(SpawnProcess):
2457         """
2458         Spawns misc-functions.sh with an existing ebuild environment.
2459         """
2460
2461         __slots__ = ("commands", "phase", "pkg", "settings")
2462
2463         def _start(self):
2464                 settings = self.settings
2465                 settings.pop("EBUILD_PHASE", None)
2466                 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2467                 misc_sh_binary = os.path.join(portage_bin_path,
2468                         os.path.basename(portage.const.MISC_SH_BINARY))
2469
2470                 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2471                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2472
2473                 portage._doebuild_exit_status_unlink(
2474                         settings.get("EBUILD_EXIT_STATUS_FILE"))
2475
2476                 SpawnProcess._start(self)
2477
2478         def _spawn(self, args, **kwargs):
2479                 settings = self.settings
2480                 debug = settings.get("PORTAGE_DEBUG") == "1"
2481                 return portage.spawn(" ".join(args), settings,
2482                         debug=debug, **kwargs)
2483
2484         def _set_returncode(self, wait_retval):
2485                 SpawnProcess._set_returncode(self, wait_retval)
2486                 self.returncode = portage._doebuild_exit_status_check_and_log(
2487                         self.settings, self.phase, self.returncode)
2488
2489 class EbuildFetcher(SpawnProcess):
2490
2491         __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2492                 ("_build_dir",)
2493
2494         def _start(self):
2495
2496                 root_config = self.pkg.root_config
2497                 portdb = root_config.trees["porttree"].dbapi
2498                 ebuild_path = portdb.findname(self.pkg.cpv)
2499                 settings = self.config_pool.allocate()
2500                 settings.setcpv(self.pkg)
2501
2502                 # In prefetch mode, logging goes to emerge-fetch.log and the builddir
2503                 # should not be touched since otherwise it could interfere with
2504                 # another instance of the same cpv concurrently being built for a
2505                 # different $ROOT (currently, builds only cooperate with prefetchers
2506                 # that are spawned for the same $ROOT).
2507                 if not self.prefetch:
2508                         self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2509                         self._build_dir.lock()
2510                         self._build_dir.clean_log()
2511                         portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2512                         if self.logfile is None:
2513                                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2514
2515                 phase = "fetch"
2516                 if self.fetchall:
2517                         phase = "fetchall"
2518
2519                 # If any incremental variables have been overridden
2520                 # via the environment, those values need to be passed
2521                 # along here so that they are correctly considered by
2522                 # the config instance in the subproccess.
2523                 fetch_env = os.environ.copy()
2524
2525                 nocolor = settings.get("NOCOLOR")
2526                 if nocolor is not None:
2527                         fetch_env["NOCOLOR"] = nocolor
2528
2529                 fetch_env["PORTAGE_NICENESS"] = "0"
2530                 if self.prefetch:
2531                         fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2532
2533                 ebuild_binary = os.path.join(
2534                         settings["PORTAGE_BIN_PATH"], "ebuild")
2535
2536                 fetch_args = [ebuild_binary, ebuild_path, phase]
2537                 debug = settings.get("PORTAGE_DEBUG") == "1"
2538                 if debug:
2539                         fetch_args.append("--debug")
2540
2541                 self.args = fetch_args
2542                 self.env = fetch_env
2543                 SpawnProcess._start(self)
2544
2545         def _pipe(self, fd_pipes):
2546                 """When appropriate, use a pty so that fetcher progress bars,
2547                 like wget has, will work properly."""
2548                 if self.background or not sys.stdout.isatty():
2549                         # When the output only goes to a log file,
2550                         # there's no point in creating a pty.
2551                         return os.pipe()
2552                 stdout_pipe = fd_pipes.get(1)
2553                 got_pty, master_fd, slave_fd = \
2554                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2555                 return (master_fd, slave_fd)
2556
2557         def _set_returncode(self, wait_retval):
2558                 SpawnProcess._set_returncode(self, wait_retval)
2559                 # Collect elog messages that might have been
2560                 # created by the pkg_nofetch phase.
2561                 if self._build_dir is not None:
2562                         # Skip elog messages for prefetch, in order to avoid duplicates.
2563                         if not self.prefetch and self.returncode != os.EX_OK:
2564                                 elog_out = None
2565                                 if self.logfile is not None:
2566                                         if self.background:
2567                                                 elog_out = open(self.logfile, 'a')
2568                                 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2569                                 if self.logfile is not None:
2570                                         msg += ", Log file:"
2571                                 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2572                                 if self.logfile is not None:
2573                                         eerror(" '%s'" % (self.logfile,),
2574                                                 phase="unpack", key=self.pkg.cpv, out=elog_out)
2575                                 if elog_out is not None:
2576                                         elog_out.close()
2577                         if not self.prefetch:
2578                                 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2579                         features = self._build_dir.settings.features
2580                         if self.returncode == os.EX_OK:
2581                                 self._build_dir.clean_log()
2582                         self._build_dir.unlock()
2583                         self.config_pool.deallocate(self._build_dir.settings)
2584                         self._build_dir = None
2585
2586 class EbuildBuildDir(SlotObject):
2587
2588         __slots__ = ("dir_path", "pkg", "settings",
2589                 "locked", "_catdir", "_lock_obj")
2590
2591         def __init__(self, **kwargs):
2592                 SlotObject.__init__(self, **kwargs)
2593                 self.locked = False
2594
2595         def lock(self):
2596                 """
2597                 This raises an AlreadyLocked exception if lock() is called
2598                 while a lock is already held. In order to avoid this, call
2599                 unlock() or check whether the "locked" attribute is True
2600                 or False before calling lock().
2601                 """
2602                 if self._lock_obj is not None:
2603                         raise self.AlreadyLocked((self._lock_obj,))
2604
2605                 dir_path = self.dir_path
2606                 if dir_path is None:
2607                         root_config = self.pkg.root_config
2608                         portdb = root_config.trees["porttree"].dbapi
2609                         ebuild_path = portdb.findname(self.pkg.cpv)
2610                         settings = self.settings
2611                         settings.setcpv(self.pkg)
2612                         debug = settings.get("PORTAGE_DEBUG") == "1"
2613                         use_cache = 1 # always true
2614                         portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2615                                 self.settings, debug, use_cache, portdb)
2616                         dir_path = self.settings["PORTAGE_BUILDDIR"]
2617
2618                 catdir = os.path.dirname(dir_path)
2619                 self._catdir = catdir
2620
2621                 portage.util.ensure_dirs(os.path.dirname(catdir),
2622                         gid=portage.portage_gid,
2623                         mode=070, mask=0)
2624                 catdir_lock = None
2625                 try:
2626                         catdir_lock = portage.locks.lockdir(catdir)
2627                         portage.util.ensure_dirs(catdir,
2628                                 gid=portage.portage_gid,
2629                                 mode=070, mask=0)
2630                         self._lock_obj = portage.locks.lockdir(dir_path)
2631                 finally:
2632                         self.locked = self._lock_obj is not None
2633                         if catdir_lock is not None:
2634                                 portage.locks.unlockdir(catdir_lock)
2635
2636         def clean_log(self):
2637                 """Uses shutil.rmtree() rather than spawning a 'clean' phase. Disabled
2638                 by keepwork or keeptemp in FEATURES."""
2639                 settings = self.settings
2640
2641                 for x in ('.logid', 'temp/build.log'):
2642                         try:
2643                                 os.unlink(os.path.join(settings["PORTAGE_BUILDDIR"], x))
2644                         except OSError:
2645                                 pass
2646
2647         def unlock(self):
2648                 if self._lock_obj is None:
2649                         return
2650
2651                 portage.locks.unlockdir(self._lock_obj)
2652                 self._lock_obj = None
2653                 self.locked = False
2654
2655                 catdir = self._catdir
2656                 catdir_lock = None
2657                 try:
2658                         catdir_lock = portage.locks.lockdir(catdir)
2659                 finally:
2660                         if catdir_lock:
2661                                 try:
2662                                         os.rmdir(catdir)
2663                                 except OSError, e:
2664                                         if e.errno not in (errno.ENOENT,
2665                                                 errno.ENOTEMPTY, errno.EEXIST):
2666                                                 raise
2667                                         del e
2668                                 portage.locks.unlockdir(catdir_lock)
2669
2670         class AlreadyLocked(portage.exception.PortageException):
2671                 pass
2672
2673 class EbuildBuild(CompositeTask):
2674
2675         __slots__ = ("args_set", "config_pool", "find_blockers",
2676                 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2677                 "prefetcher", "settings", "world_atom") + \
2678                 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2679
2680         def _start(self):
2681
2682                 logger = self.logger
2683                 opts = self.opts
2684                 pkg = self.pkg
2685                 settings = self.settings
2686                 world_atom = self.world_atom
2687                 root_config = pkg.root_config
2688                 tree = "porttree"
2689                 self._tree = tree
2690                 portdb = root_config.trees[tree].dbapi
2691                 settings.setcpv(pkg)
2692                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2693                 ebuild_path = portdb.findname(self.pkg.cpv)
2694                 self._ebuild_path = ebuild_path
2695
2696                 prefetcher = self.prefetcher
2697                 if prefetcher is None:
2698                         pass
2699                 elif not prefetcher.isAlive():
2700                         prefetcher.cancel()
2701                 elif prefetcher.poll() is None:
2702
2703                         waiting_msg = "Fetching files " + \
2704                                 "in the background. " + \
2705                                 "To view fetch progress, run `tail -f " + \
2706                                 "/var/log/emerge-fetch.log` in another " + \
2707                                 "terminal."
2708                         msg_prefix = colorize("GOOD", " * ")
2709                         from textwrap import wrap
2710                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2711                                 for line in wrap(waiting_msg, 65))
2712                         if not self.background:
2713                                 writemsg(waiting_msg, noiselevel=-1)
2714
2715                         self._current_task = prefetcher
2716                         prefetcher.addExitListener(self._prefetch_exit)
2717                         return
2718
2719                 self._prefetch_exit(prefetcher)
2720
2721         def _prefetch_exit(self, prefetcher):
2722
2723                 opts = self.opts
2724                 pkg = self.pkg
2725                 settings = self.settings
2726
2727                 if opts.fetchonly:
2728                                 fetcher = EbuildFetchonly(
2729                                         fetch_all=opts.fetch_all_uri,
2730                                         pkg=pkg, pretend=opts.pretend,
2731                                         settings=settings)
2732                                 retval = fetcher.execute()
2733                                 self.returncode = retval
2734                                 self.wait()
2735                                 return
2736
2737                 fetcher = EbuildFetcher(config_pool=self.config_pool,
2738                         fetchall=opts.fetch_all_uri,
2739                         fetchonly=opts.fetchonly,
2740                         background=self.background,
2741                         pkg=pkg, scheduler=self.scheduler)
2742
2743                 self._start_task(fetcher, self._fetch_exit)
2744
2745         def _fetch_exit(self, fetcher):
2746                 opts = self.opts
2747                 pkg = self.pkg
2748
2749                 fetch_failed = False
2750                 if opts.fetchonly:
2751                         fetch_failed = self._final_exit(fetcher) != os.EX_OK
2752                 else:
2753                         fetch_failed = self._default_exit(fetcher) != os.EX_OK
2754
2755                 if fetch_failed and fetcher.logfile is not None and \
2756                         os.path.exists(fetcher.logfile):
2757                         self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2758
2759                 if not fetch_failed and fetcher.logfile is not None:
2760                         # Fetch was successful, so remove the fetch log.
2761                         try:
2762                                 os.unlink(fetcher.logfile)
2763                         except OSError:
2764                                 pass
2765
2766                 if fetch_failed or opts.fetchonly:
2767                         self.wait()
2768                         return
2769
2770                 logger = self.logger
2771                 opts = self.opts
2772                 pkg_count = self.pkg_count
2773                 scheduler = self.scheduler
2774                 settings = self.settings
2775                 features = settings.features
2776                 ebuild_path = self._ebuild_path
2777                 system_set = pkg.root_config.sets["system"]
2778
2779                 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2780                 self._build_dir.lock()
2781
2782                 # Cleaning is triggered before the setup
2783                 # phase, in portage.doebuild().
2784                 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2785                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2786                 short_msg = "emerge: (%s of %s) %s Clean" % \
2787                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2788                 logger.log(msg, short_msg=short_msg)
2789
2790                 #buildsyspkg: Check if we need to _force_ binary package creation
2791                 self._issyspkg = "buildsyspkg" in features and \
2792                                 system_set.findAtomForPackage(pkg) and \
2793                                 not opts.buildpkg
2794
2795                 if opts.buildpkg or self._issyspkg:
2796
2797                         self._buildpkg = True
2798
2799                         msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2800                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2801                         short_msg = "emerge: (%s of %s) %s Compile" % \
2802                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2803                         logger.log(msg, short_msg=short_msg)
2804
2805                 else:
2806                         msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2807                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2808                         short_msg = "emerge: (%s of %s) %s Compile" % \
2809                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2810                         logger.log(msg, short_msg=short_msg)
2811
2812                 build = EbuildExecuter(background=self.background, pkg=pkg,
2813                         scheduler=scheduler, settings=settings)
2814                 self._start_task(build, self._build_exit)
2815
2816         def _unlock_builddir(self):
2817                 portage.elog.elog_process(self.pkg.cpv, self.settings)
2818                 self._build_dir.unlock()
2819
2820         def _build_exit(self, build):
2821                 if self._default_exit(build) != os.EX_OK:
2822                         self._unlock_builddir()
2823                         self.wait()
2824                         return
2825
2826                 opts = self.opts
2827                 buildpkg = self._buildpkg
2828
2829                 if not buildpkg:
2830                         self._final_exit(build)
2831                         self.wait()
2832                         return
2833
2834                 if self._issyspkg:
2835                         msg = ">>> This is a system package, " + \
2836                                 "let's pack a rescue tarball.\n"
2837
2838                         log_path = self.settings.get("PORTAGE_LOG_FILE")
2839                         if log_path is not None:
2840                                 log_file = open(log_path, 'a')
2841                                 try:
2842                                         log_file.write(msg)
2843                                 finally:
2844                                         log_file.close()
2845
2846                         if not self.background:
2847                                 portage.writemsg_stdout(msg, noiselevel=-1)
2848
2849                 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2850                         scheduler=self.scheduler, settings=self.settings)
2851
2852                 self._start_task(packager, self._buildpkg_exit)
2853
2854         def _buildpkg_exit(self, packager):
2855                 """
2856                 Released build dir lock when there is a failure or
2857                 when in buildpkgonly mode. Otherwise, the lock will
2858                 be released when merge() is called.
2859                 """
2860
2861                 if self._default_exit(packager) != os.EX_OK:
2862                         self._unlock_builddir()
2863                         self.wait()
2864                         return
2865
2866                 if self.opts.buildpkgonly:
2867                         # Need to call "clean" phase for buildpkgonly mode
2868                         portage.elog.elog_process(self.pkg.cpv, self.settings)
2869                         phase = "clean"
2870                         clean_phase = EbuildPhase(background=self.background,
2871                                 pkg=self.pkg, phase=phase,
2872                                 scheduler=self.scheduler, settings=self.settings,
2873                                 tree=self._tree)
2874                         self._start_task(clean_phase, self._clean_exit)
2875                         return
2876
2877                 # Continue holding the builddir lock until
2878                 # after the package has been installed.
2879                 self._current_task = None
2880                 self.returncode = packager.returncode
2881                 self.wait()
2882
2883         def _clean_exit(self, clean_phase):
2884                 if self._final_exit(clean_phase) != os.EX_OK or \
2885                         self.opts.buildpkgonly:
2886                         self._unlock_builddir()
2887                 self.wait()
2888
2889         def install(self):
2890                 """
2891                 Install the package and then clean up and release locks.
2892                 Only call this after the build has completed successfully
2893                 and neither fetchonly nor buildpkgonly mode are enabled.
2894                 """
2895
2896                 find_blockers = self.find_blockers
2897                 ldpath_mtimes = self.ldpath_mtimes
2898                 logger = self.logger
2899                 pkg = self.pkg
2900                 pkg_count = self.pkg_count
2901                 settings = self.settings
2902                 world_atom = self.world_atom
2903                 ebuild_path = self._ebuild_path
2904                 tree = self._tree
2905
2906                 merge = EbuildMerge(find_blockers=self.find_blockers,
2907                         ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2908                         pkg_count=pkg_count, pkg_path=ebuild_path,
2909                         scheduler=self.scheduler,
2910                         settings=settings, tree=tree, world_atom=world_atom)
2911
2912                 msg = " === (%s of %s) Merging (%s::%s)" % \
2913                         (pkg_count.curval, pkg_count.maxval,
2914                         pkg.cpv, ebuild_path)
2915                 short_msg = "emerge: (%s of %s) %s Merge" % \
2916                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2917                 logger.log(msg, short_msg=short_msg)
2918
2919                 try:
2920                         rval = merge.execute()
2921                 finally:
2922                         self._unlock_builddir()
2923
2924                 return rval
2925
2926 class EbuildExecuter(CompositeTask):
2927
2928         __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2929
2930         _phases = ("prepare", "configure", "compile", "test", "install")
2931
2932         _live_eclasses = frozenset([
2933                 "bzr",
2934                 "cvs",
2935                 "darcs",
2936                 "git",
2937                 "mercurial",
2938                 "subversion"
2939         ])
2940
2941         def _start(self):
2942                 self._tree = "porttree"
2943                 pkg = self.pkg
2944                 phase = "clean"
2945                 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2946                         scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2947                 self._start_task(clean_phase, self._clean_phase_exit)
2948
2949         def _clean_phase_exit(self, clean_phase):
2950
2951                 if self._default_exit(clean_phase) != os.EX_OK:
2952                         self.wait()
2953                         return
2954
2955                 pkg = self.pkg
2956                 scheduler = self.scheduler
2957                 settings = self.settings
2958                 cleanup = 1
2959
2960                 # This initializes PORTAGE_LOG_FILE.
2961                 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2962
2963                 setup_phase = EbuildPhase(background=self.background,
2964                         pkg=pkg, phase="setup", scheduler=scheduler,
2965                         settings=settings, tree=self._tree)
2966
2967                 setup_phase.addExitListener(self._setup_exit)
2968                 self._current_task = setup_phase
2969                 self.scheduler.scheduleSetup(setup_phase)
2970
2971         def _setup_exit(self, setup_phase):
2972
2973                 if self._default_exit(setup_phase) != os.EX_OK:
2974                         self.wait()
2975                         return
2976
2977                 unpack_phase = EbuildPhase(background=self.background,
2978                         pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2979                         settings=self.settings, tree=self._tree)
2980
2981                 if self._live_eclasses.intersection(self.pkg.inherited):
2982                         # Serialize $DISTDIR access for live ebuilds since
2983                         # otherwise they can interfere with eachother.
2984
2985                         unpack_phase.addExitListener(self._unpack_exit)
2986                         self._current_task = unpack_phase
2987                         self.scheduler.scheduleUnpack(unpack_phase)
2988
2989                 else:
2990                         self._start_task(unpack_phase, self._unpack_exit)
2991
2992         def _unpack_exit(self, unpack_phase):
2993
2994                 if self._default_exit(unpack_phase) != os.EX_OK:
2995                         self.wait()
2996                         return
2997
2998                 ebuild_phases = TaskSequence(scheduler=self.scheduler)
2999
3000                 pkg = self.pkg
3001                 phases = self._phases
3002                 eapi = pkg.metadata["EAPI"]
3003                 if eapi in ("0", "1"):
3004                         # skip src_prepare and src_configure
3005                         phases = phases[2:]
3006
3007                 for phase in phases:
3008                         ebuild_phases.add(EbuildPhase(background=self.background,
3009                                 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3010                                 settings=self.settings, tree=self._tree))
3011
3012                 self._start_task(ebuild_phases, self._default_final_exit)
3013
3014 class EbuildMetadataPhase(SubProcess):
3015
3016         """
3017         Asynchronous interface for the ebuild "depend" phase which is
3018         used to extract metadata from the ebuild.
3019         """
3020
3021         __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3022                 "ebuild_mtime", "portdb", "repo_path", "settings") + \
3023                 ("_raw_metadata",)
3024
3025         _file_names = ("ebuild",)
3026         _files_dict = slot_dict_class(_file_names, prefix="")
3027         _metadata_fd = 9
3028
3029         def _start(self):
3030                 settings = self.settings
3031                 settings.reset()
3032                 ebuild_path = self.ebuild_path
3033                 debug = settings.get("PORTAGE_DEBUG") == "1"
3034                 master_fd = None
3035                 slave_fd = None
3036                 fd_pipes = None
3037                 if self.fd_pipes is not None:
3038                         fd_pipes = self.fd_pipes.copy()
3039                 else:
3040                         fd_pipes = {}
3041
3042                 fd_pipes.setdefault(0, sys.stdin.fileno())
3043                 fd_pipes.setdefault(1, sys.stdout.fileno())
3044                 fd_pipes.setdefault(2, sys.stderr.fileno())
3045
3046                 # flush any pending output
3047                 for fd in fd_pipes.itervalues():
3048                         if fd == sys.stdout.fileno():
3049                                 sys.stdout.flush()
3050                         if fd == sys.stderr.fileno():
3051                                 sys.stderr.flush()
3052
3053                 fd_pipes_orig = fd_pipes.copy()
3054                 self._files = self._files_dict()
3055                 files = self._files
3056
3057                 master_fd, slave_fd = os.pipe()
3058                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3059                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3060
3061                 fd_pipes[self._metadata_fd] = slave_fd
3062
3063                 self._raw_metadata = []
3064                 files.ebuild = os.fdopen(master_fd, 'r')
3065                 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3066                         self._registered_events, self._output_handler)
3067                 self._registered = True
3068
3069                 retval = portage.doebuild(ebuild_path, "depend",
3070                         settings["ROOT"], settings, debug,
3071                         mydbapi=self.portdb, tree="porttree",
3072                         fd_pipes=fd_pipes, returnpid=True)
3073
3074                 os.close(slave_fd)
3075
3076                 if isinstance(retval, int):
3077                         # doebuild failed before spawning
3078                         self._unregister()
3079                         self.returncode = retval
3080                         self.wait()
3081                         return
3082
3083                 self.pid = retval[0]
3084                 portage.process.spawned_pids.remove(self.pid)
3085
3086         def _output_handler(self, fd, event):
3087
3088                 if event & PollConstants.POLLIN:
3089                         self._raw_metadata.append(self._files.ebuild.read())
3090                         if not self._raw_metadata[-1]:
3091                                 self._unregister()
3092                                 self.wait()
3093
3094                 self._unregister_if_appropriate(event)
3095                 return self._registered
3096
3097         def _set_returncode(self, wait_retval):
3098                 SubProcess._set_returncode(self, wait_retval)
3099                 if self.returncode == os.EX_OK:
3100                         metadata_lines = "".join(self._raw_metadata).splitlines()
3101                         if len(portage.auxdbkeys) != len(metadata_lines):
3102                                 # Don't trust bash's returncode if the
3103                                 # number of lines is incorrect.
3104                                 self.returncode = 1
3105                         else:
3106                                 metadata = izip(portage.auxdbkeys, metadata_lines)
3107                                 self.metadata_callback(self.cpv, self.ebuild_path,
3108                                         self.repo_path, metadata, self.ebuild_mtime)
3109
3110 class EbuildProcess(SpawnProcess):
3111
3112         __slots__ = ("phase", "pkg", "settings", "tree")
3113
3114         def _start(self):
3115                 # Don't open the log file during the clean phase since the
3116                 # open file can result in an nfs lock on $T/build.log which
3117                 # prevents the clean phase from removing $T.
3118                 if self.phase not in ("clean", "cleanrm"):
3119                         self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3120                 SpawnProcess._start(self)
3121
3122         def _pipe(self, fd_pipes):
3123                 stdout_pipe = fd_pipes.get(1)
3124                 got_pty, master_fd, slave_fd = \
3125                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3126                 return (master_fd, slave_fd)
3127
3128         def _spawn(self, args, **kwargs):
3129
3130                 root_config = self.pkg.root_config
3131                 tree = self.tree
3132                 mydbapi = root_config.trees[tree].dbapi
3133                 settings = self.settings
3134                 ebuild_path = settings["EBUILD"]
3135                 debug = settings.get("PORTAGE_DEBUG") == "1"
3136
3137                 rval = portage.doebuild(ebuild_path, self.phase,
3138                         root_config.root, settings, debug,
3139                         mydbapi=mydbapi, tree=tree, **kwargs)
3140
3141                 return rval
3142
3143         def _set_returncode(self, wait_retval):
3144                 SpawnProcess._set_returncode(self, wait_retval)
3145
3146                 if self.phase not in ("clean", "cleanrm"):
3147                         self.returncode = portage._doebuild_exit_status_check_and_log(
3148                                 self.settings, self.phase, self.returncode)
3149
3150                 if self.phase == "test" and self.returncode != os.EX_OK and \
3151                         "test-fail-continue" in self.settings.features:
3152                         self.returncode = os.EX_OK
3153
3154                 portage._post_phase_userpriv_perms(self.settings)
3155
3156 class EbuildPhase(CompositeTask):
3157
3158         __slots__ = ("background", "pkg", "phase",
3159                 "scheduler", "settings", "tree")
3160
3161         _post_phase_cmds = portage._post_phase_cmds
3162
3163         def _start(self):
3164
3165                 ebuild_process = EbuildProcess(background=self.background,
3166                         pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3167                         settings=self.settings, tree=self.tree)
3168
3169                 self._start_task(ebuild_process, self._ebuild_exit)
3170
3171         def _ebuild_exit(self, ebuild_process):
3172
3173                 if self.phase == "install":
3174                         out = None
3175                         log_path = self.settings.get("PORTAGE_LOG_FILE")
3176                         log_file = None
3177                         if self.background and log_path is not None:
3178                                 log_file = open(log_path, 'a')
3179                                 out = log_file
3180                         try:
3181                                 portage._check_build_log(self.settings, out=out)
3182                         finally:
3183                                 if log_file is not None:
3184                                         log_file.close()
3185
3186                 if self._default_exit(ebuild_process) != os.EX_OK:
3187                         self.wait()
3188                         return
3189
3190                 settings = self.settings
3191
3192                 if self.phase == "install":
3193                         portage._post_src_install_uid_fix(settings)
3194
3195                 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3196                 if post_phase_cmds is not None:
3197                         post_phase = MiscFunctionsProcess(background=self.background,
3198                                 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3199                                 scheduler=self.scheduler, settings=settings)
3200                         self._start_task(post_phase, self._post_phase_exit)
3201                         return
3202
3203                 self.returncode = ebuild_process.returncode
3204                 self._current_task = None
3205                 self.wait()
3206
3207         def _post_phase_exit(self, post_phase):
3208                 if self._final_exit(post_phase) != os.EX_OK:
3209                         writemsg("!!! post %s failed; exiting.\n" % self.phase,
3210                                 noiselevel=-1)
3211                 self._current_task = None
3212                 self.wait()
3213                 return
3214
3215 class EbuildBinpkg(EbuildProcess):
3216         """
3217         This assumes that src_install() has successfully completed.
3218         """
3219         __slots__ = ("_binpkg_tmpfile",)
3220
3221         def _start(self):
3222                 self.phase = "package"
3223                 self.tree = "porttree"
3224                 pkg = self.pkg
3225                 root_config = pkg.root_config
3226                 portdb = root_config.trees["porttree"].dbapi
3227                 bintree = root_config.trees["bintree"]
3228                 ebuild_path = portdb.findname(self.pkg.cpv)
3229                 settings = self.settings
3230                 debug = settings.get("PORTAGE_DEBUG") == "1"
3231
3232                 bintree.prevent_collision(pkg.cpv)
3233                 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3234                         pkg.cpv + ".tbz2." + str(os.getpid()))
3235                 self._binpkg_tmpfile = binpkg_tmpfile
3236                 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3237                 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3238
3239                 try:
3240                         EbuildProcess._start(self)
3241                 finally:
3242                         settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3243
3244         def _set_returncode(self, wait_retval):
3245                 EbuildProcess._set_returncode(self, wait_retval)
3246
3247                 pkg = self.pkg
3248                 bintree = pkg.root_config.trees["bintree"]
3249                 binpkg_tmpfile = self._binpkg_tmpfile
3250                 if self.returncode == os.EX_OK:
3251                         bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3252
3253 class EbuildMerge(SlotObject):
3254
3255         __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3256                 "pkg", "pkg_count", "pkg_path", "pretend",
3257                 "scheduler", "settings", "tree", "world_atom")
3258
3259         def execute(self):
3260                 root_config = self.pkg.root_config
3261                 settings = self.settings
3262                 retval = portage.merge(settings["CATEGORY"],
3263                         settings["PF"], settings["D"],
3264                         os.path.join(settings["PORTAGE_BUILDDIR"],
3265                         "build-info"), root_config.root, settings,
3266                         myebuild=settings["EBUILD"],
3267                         mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3268                         vartree=root_config.trees["vartree"],
3269                         prev_mtimes=self.ldpath_mtimes,
3270                         scheduler=self.scheduler,
3271                         blockers=self.find_blockers)
3272
3273                 if retval == os.EX_OK:
3274                         self.world_atom(self.pkg)
3275                         self._log_success()
3276
3277                 return retval
3278
3279         def _log_success(self):
3280                 pkg = self.pkg
3281                 pkg_count = self.pkg_count
3282                 pkg_path = self.pkg_path
3283                 logger = self.logger
3284                 if "noclean" not in self.settings.features:
3285                         short_msg = "emerge: (%s of %s) %s Clean Post" % \
3286                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3287                         logger.log((" === (%s of %s) " + \
3288                                 "Post-Build Cleaning (%s::%s)") % \
3289                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3290                                 short_msg=short_msg)
3291                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3292                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3293
3294 class PackageUninstall(AsynchronousTask):
3295
3296         __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3297
3298         def _start(self):
3299                 try:
3300                         unmerge(self.pkg.root_config, self.opts, "unmerge",
3301                                 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3302                                 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3303                                 writemsg_level=self._writemsg_level)
3304                 except UninstallFailure, e:
3305                         self.returncode = e.status
3306                 else:
3307                         self.returncode = os.EX_OK
3308                 self.wait()
3309
3310         def _writemsg_level(self, msg, level=0, noiselevel=0):
3311
3312                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3313                 background = self.background
3314
3315                 if log_path is None:
3316                         if not (background and level < logging.WARNING):
3317                                 portage.util.writemsg_level(msg,
3318                                         level=level, noiselevel=noiselevel)
3319                 else:
3320                         if not background:
3321                                 portage.util.writemsg_level(msg,
3322                                         level=level, noiselevel=noiselevel)
3323
3324                         f = open(log_path, 'a')
3325                         try:
3326                                 f.write(msg)
3327                         finally:
3328                                 f.close()
3329
3330 class Binpkg(CompositeTask):
3331
3332         __slots__ = ("find_blockers",
3333                 "ldpath_mtimes", "logger", "opts",
3334                 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3335                 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3336                 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3337
3338         def _writemsg_level(self, msg, level=0, noiselevel=0):
3339
3340                 if not self.background:
3341                         portage.util.writemsg_level(msg,
3342                                 level=level, noiselevel=noiselevel)
3343
3344                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3345                 if  log_path is not None:
3346                         f = open(log_path, 'a')
3347                         try:
3348                                 f.write(msg)
3349                         finally:
3350                                 f.close()
3351
3352         def _start(self):
3353
3354                 pkg = self.pkg
3355                 settings = self.settings
3356                 settings.setcpv(pkg)
3357                 self._tree = "bintree"
3358                 self._bintree = self.pkg.root_config.trees[self._tree]
3359                 self._verify = not self.opts.pretend
3360
3361                 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3362                         "portage", pkg.category, pkg.pf)
3363                 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3364                         pkg=pkg, settings=settings)
3365                 self._image_dir = os.path.join(dir_path, "image")
3366                 self._infloc = os.path.join(dir_path, "build-info")
3367                 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3368                 settings["EBUILD"] = self._ebuild_path
3369                 debug = settings.get("PORTAGE_DEBUG") == "1"
3370                 portage.doebuild_environment(self._ebuild_path, "setup",
3371                         settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3372                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3373
3374                 # The prefetcher has already completed or it
3375                 # could be running now. If it's running now,
3376                 # wait for it to complete since it holds
3377                 # a lock on the file being fetched. The
3378                 # portage.locks functions are only designed
3379                 # to work between separate processes. Since
3380                 # the lock is held by the current process,
3381                 # use the scheduler and fetcher methods to
3382                 # synchronize with the fetcher.
3383                 prefetcher = self.prefetcher
3384                 if prefetcher is None:
3385                         pass
3386                 elif not prefetcher.isAlive():
3387                         prefetcher.cancel()
3388                 elif prefetcher.poll() is None:
3389
3390                         waiting_msg = ("Fetching '%s' " + \
3391                                 "in the background. " + \
3392                                 "To view fetch progress, run `tail -f " + \
3393                                 "/var/log/emerge-fetch.log` in another " + \
3394                                 "terminal.") % prefetcher.pkg_path
3395                         msg_prefix = colorize("GOOD", " * ")
3396                         from textwrap import wrap
3397                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3398                                 for line in wrap(waiting_msg, 65))
3399                         if not self.background:
3400                                 writemsg(waiting_msg, noiselevel=-1)
3401
3402                         self._current_task = prefetcher
3403                         prefetcher.addExitListener(self._prefetch_exit)
3404                         return
3405
3406                 self._prefetch_exit(prefetcher)
3407
3408         def _prefetch_exit(self, prefetcher):
3409
3410                 pkg = self.pkg
3411                 pkg_count = self.pkg_count
3412                 if not (self.opts.pretend or self.opts.fetchonly):
3413                         self._build_dir.lock()
3414                         # If necessary, discard old log so that we don't
3415                         # append to it.
3416                         self._build_dir.clean_log()
3417                         # Initialze PORTAGE_LOG_FILE.
3418                         portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3419                 fetcher = BinpkgFetcher(background=self.background,
3420                         logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3421                         pretend=self.opts.pretend, scheduler=self.scheduler)
3422                 pkg_path = fetcher.pkg_path
3423                 self._pkg_path = pkg_path
3424
3425                 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3426
3427                         msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3428                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3429                         short_msg = "emerge: (%s of %s) %s Fetch" % \
3430                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3431                         self.logger.log(msg, short_msg=short_msg)
3432                         self._start_task(fetcher, self._fetcher_exit)
3433                         return
3434
3435                 self._fetcher_exit(fetcher)
3436
3437         def _fetcher_exit(self, fetcher):
3438
3439                 # The fetcher only has a returncode when
3440                 # --getbinpkg is enabled.
3441                 if fetcher.returncode is not None:
3442                         self._fetched_pkg = True
3443                         if self._default_exit(fetcher) != os.EX_OK:
3444                                 self._unlock_builddir()
3445                                 self.wait()
3446                                 return
3447
3448                 if self.opts.pretend:
3449                         self._current_task = None
3450                         self.returncode = os.EX_OK
3451                         self.wait()
3452                         return
3453
3454                 verifier = None
3455                 if self._verify:
3456                         logfile = None
3457                         if self.background:
3458                                 logfile = self.settings.get("PORTAGE_LOG_FILE")
3459                         verifier = BinpkgVerifier(background=self.background,
3460                                 logfile=logfile, pkg=self.pkg)
3461                         self._start_task(verifier, self._verifier_exit)
3462                         return
3463
3464                 self._verifier_exit(verifier)
3465
3466         def _verifier_exit(self, verifier):
3467                 if verifier is not None and \
3468                         self._default_exit(verifier) != os.EX_OK:
3469                         self._unlock_builddir()
3470                         self.wait()
3471                         return
3472
3473                 logger = self.logger
3474                 pkg = self.pkg
3475                 pkg_count = self.pkg_count
3476                 pkg_path = self._pkg_path
3477
3478                 if self._fetched_pkg:
3479                         self._bintree.inject(pkg.cpv, filename=pkg_path)
3480
3481                 if self.opts.fetchonly:
3482                         self._current_task = None
3483                         self.returncode = os.EX_OK
3484                         self.wait()
3485                         return
3486
3487                 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3488                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3489                 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3490                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3491                 logger.log(msg, short_msg=short_msg)
3492
3493                 phase = "clean"
3494                 settings = self.settings
3495                 ebuild_phase = EbuildPhase(background=self.background,
3496                         pkg=pkg, phase=phase, scheduler=self.scheduler,
3497                         settings=settings, tree=self._tree)
3498
3499                 self._start_task(ebuild_phase, self._clean_exit)
3500
3501         def _clean_exit(self, clean_phase):
3502                 if self._default_exit(clean_phase) != os.EX_OK:
3503                         self._unlock_builddir()
3504                         self.wait()
3505                         return
3506
3507                 dir_path = self._build_dir.dir_path
3508
3509                 infloc = self._infloc
3510                 pkg = self.pkg
3511                 pkg_path = self._pkg_path
3512
3513                 dir_mode = 0755
3514                 for mydir in (dir_path, self._image_dir, infloc):
3515                         portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3516                                 gid=portage.data.portage_gid, mode=dir_mode)
3517
3518                 # This initializes PORTAGE_LOG_FILE.
3519                 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3520                 self._writemsg_level(">>> Extracting info\n")
3521
3522                 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3523                 check_missing_metadata = ("CATEGORY", "PF")
3524                 missing_metadata = set()
3525                 for k in check_missing_metadata:
3526                         v = pkg_xpak.getfile(k)
3527                         if not v:
3528                                 missing_metadata.add(k)
3529
3530                 pkg_xpak.unpackinfo(infloc)
3531                 for k in missing_metadata:
3532                         if k == "CATEGORY":
3533                                 v = pkg.category
3534                         elif k == "PF":
3535                                 v = pkg.pf
3536                         else:
3537                                 continue
3538
3539                         f = open(os.path.join(infloc, k), 'wb')
3540                         try:
3541                                 f.write(v + "\n")
3542                         finally:
3543                                 f.close()
3544
3545                 # Store the md5sum in the vdb.
3546                 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3547                 try:
3548                         f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3549                 finally:
3550                         f.close()
3551
3552                 # This gives bashrc users an opportunity to do various things
3553                 # such as remove binary packages after they're installed.
3554                 settings = self.settings
3555                 settings.setcpv(self.pkg)
3556                 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3557                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3558
3559                 phase = "setup"
3560                 setup_phase = EbuildPhase(background=self.background,
3561                         pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3562                         settings=settings, tree=self._tree)
3563
3564                 setup_phase.addExitListener(self._setup_exit)
3565                 self._current_task = setup_phase
3566                 self.scheduler.scheduleSetup(setup_phase)
3567
3568         def _setup_exit(self, setup_phase):
3569                 if self._default_exit(setup_phase) != os.EX_OK:
3570                         self._unlock_builddir()
3571                         self.wait()
3572                         return
3573
3574                 extractor = BinpkgExtractorAsync(background=self.background,
3575                         image_dir=self._image_dir,
3576                         pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3577                 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3578                 self._start_task(extractor, self._extractor_exit)
3579
3580         def _extractor_exit(self, extractor):
3581                 if self._final_exit(extractor) != os.EX_OK:
3582                         self._unlock_builddir()
3583                         writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3584                                 noiselevel=-1)
3585                 self.wait()
3586
3587         def _unlock_builddir(self):
3588                 if self.opts.pretend or self.opts.fetchonly:
3589                         return
3590                 portage.elog.elog_process(self.pkg.cpv, self.settings)
3591                 self._build_dir.unlock()
3592
3593         def install(self):
3594
3595                 # This gives bashrc users an opportunity to do various things
3596                 # such as remove binary packages after they're installed.
3597                 settings = self.settings
3598                 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3599                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3600
3601                 merge = EbuildMerge(find_blockers=self.find_blockers,
3602                         ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3603                         pkg=self.pkg, pkg_count=self.pkg_count,
3604                         pkg_path=self._pkg_path, scheduler=self.scheduler,
3605                         settings=settings, tree=self._tree, world_atom=self.world_atom)
3606
3607                 try:
3608                         retval = merge.execute()
3609                 finally:
3610                         settings.pop("PORTAGE_BINPKG_FILE", None)
3611                         self._unlock_builddir()
3612                 return retval
3613
3614 class BinpkgFetcher(SpawnProcess):
3615
3616         __slots__ = ("pkg", "pretend",
3617                 "locked", "pkg_path", "_lock_obj")
3618
3619         def __init__(self, **kwargs):
3620                 SpawnProcess.__init__(self, **kwargs)
3621                 pkg = self.pkg
3622                 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3623
3624         def _start(self):
3625
3626                 if self.cancelled:
3627                         return
3628
3629                 pkg = self.pkg
3630                 pretend = self.pretend
3631                 bintree = pkg.root_config.trees["bintree"]
3632                 settings = bintree.settings
3633                 use_locks = "distlocks" in settings.features
3634                 pkg_path = self.pkg_path
3635
3636                 if not pretend:
3637                         portage.util.ensure_dirs(os.path.dirname(pkg_path))
3638                         if use_locks:
3639                                 self.lock()
3640                 exists = os.path.exists(pkg_path)
3641                 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3642                 if not (pretend or resume):
3643                         # Remove existing file or broken symlink.
3644                         try:
3645                                 os.unlink(pkg_path)
3646                         except OSError:
3647                                 pass
3648
3649                 # urljoin doesn't work correctly with
3650                 # unrecognized protocols like sftp
3651                 if bintree._remote_has_index:
3652                         rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3653                         if not rel_uri:
3654                                 rel_uri = pkg.cpv + ".tbz2"
3655                         uri = bintree._remote_base_uri.rstrip("/") + \
3656                                 "/" + rel_uri.lstrip("/")
3657                 else:
3658                         uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3659                                 "/" + pkg.pf + ".tbz2"
3660
3661                 if pretend:
3662                         portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3663                         self.returncode = os.EX_OK
3664                         self.wait()
3665                         return
3666
3667                 protocol = urlparse.urlparse(uri)[0]
3668                 fcmd_prefix = "FETCHCOMMAND"
3669                 if resume:
3670                         fcmd_prefix = "RESUMECOMMAND"
3671                 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3672                 if not fcmd:
3673                         fcmd = settings.get(fcmd_prefix)
3674
3675                 fcmd_vars = {
3676                         "DISTDIR" : os.path.dirname(pkg_path),
3677                         "URI"     : uri,
3678                         "FILE"    : os.path.basename(pkg_path)
3679                 }
3680
3681                 fetch_env = dict(settings.iteritems())
3682                 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3683                         for x in shlex.split(fcmd)]
3684
3685                 if self.fd_pipes is None:
3686                         self.fd_pipes = {}
3687                 fd_pipes = self.fd_pipes
3688
3689                 # Redirect all output to stdout since some fetchers like
3690                 # wget pollute stderr (if portage detects a problem then it
3691                 # can send it's own message to stderr).
3692                 fd_pipes.setdefault(0, sys.stdin.fileno())
3693                 fd_pipes.setdefault(1, sys.stdout.fileno())
3694                 fd_pipes.setdefault(2, sys.stdout.fileno())
3695
3696                 self.args = fetch_args
3697                 self.env = fetch_env
3698                 SpawnProcess._start(self)
3699
3700         def _set_returncode(self, wait_retval):
3701                 SpawnProcess._set_returncode(self, wait_retval)
3702                 if self.returncode == os.EX_OK:
3703                         # If possible, update the mtime to match the remote package if
3704                         # the fetcher didn't already do it automatically.
3705                         bintree = self.pkg.root_config.trees["bintree"]
3706                         if bintree._remote_has_index:
3707                                 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3708                                 if remote_mtime is not None:
3709                                         try:
3710                                                 remote_mtime = long(remote_mtime)
3711                                         except ValueError:
3712                                                 pass
3713                                         else:
3714                                                 try:
3715                                                         local_mtime = long(os.stat(self.pkg_path).st_mtime)
3716                                                 except OSError:
3717                                                         pass
3718                                                 else:
3719                                                         if remote_mtime != local_mtime:
3720                                                                 try:
3721                                                                         os.utime(self.pkg_path,
3722                                                                                 (remote_mtime, remote_mtime))
3723                                                                 except OSError:
3724                                                                         pass
3725
3726                 if self.locked:
3727                         self.unlock()
3728
3729         def lock(self):
3730                 """
3731                 This raises an AlreadyLocked exception if lock() is called
3732                 while a lock is already held. In order to avoid this, call
3733                 unlock() or check whether the "locked" attribute is True
3734                 or False before calling lock().
3735                 """
3736                 if self._lock_obj is not None:
3737                         raise self.AlreadyLocked((self._lock_obj,))
3738
3739                 self._lock_obj = portage.locks.lockfile(
3740                         self.pkg_path, wantnewlockfile=1)
3741                 self.locked = True
3742
3743         class AlreadyLocked(portage.exception.PortageException):
3744                 pass
3745
3746         def unlock(self):
3747                 if self._lock_obj is None:
3748                         return
3749                 portage.locks.unlockfile(self._lock_obj)
3750                 self._lock_obj = None
3751                 self.locked = False
3752
3753 class BinpkgVerifier(AsynchronousTask):
3754         __slots__ = ("logfile", "pkg",)
3755
3756         def _start(self):
3757                 """
3758                 Note: Unlike a normal AsynchronousTask.start() method,
3759                 this one does all work is synchronously. The returncode
3760                 attribute will be set before it returns.
3761                 """
3762
3763                 pkg = self.pkg
3764                 root_config = pkg.root_config
3765                 bintree = root_config.trees["bintree"]
3766                 rval = os.EX_OK
3767                 stdout_orig = sys.stdout
3768                 stderr_orig = sys.stderr
3769                 log_file = None
3770                 if self.background and self.logfile is not None:
3771                         log_file = open(self.logfile, 'a')
3772                 try:
3773                         if log_file is not None:
3774                                 sys.stdout = log_file
3775                                 sys.stderr = log_file
3776                         try:
3777                                 bintree.digestCheck(pkg)
3778                         except portage.exception.FileNotFound:
3779                                 writemsg("!!! Fetching Binary failed " + \
3780                                         "for '%s'\n" % pkg.cpv, noiselevel=-1)
3781                                 rval = 1
3782                         except portage.exception.DigestException, e:
3783                                 writemsg("\n!!! Digest verification failed:\n",
3784                                         noiselevel=-1)
3785                                 writemsg("!!! %s\n" % e.value[0],
3786                                         noiselevel=-1)
3787                                 writemsg("!!! Reason: %s\n" % e.value[1],
3788                                         noiselevel=-1)
3789                                 writemsg("!!! Got: %s\n" % e.value[2],
3790                                         noiselevel=-1)
3791                                 writemsg("!!! Expected: %s\n" % e.value[3],
3792                                         noiselevel=-1)
3793                                 rval = 1
3794                         if rval != os.EX_OK:
3795                                 pkg_path = bintree.getname(pkg.cpv)
3796                                 head, tail = os.path.split(pkg_path)
3797                                 temp_filename = portage._checksum_failure_temp_file(head, tail)
3798                                 writemsg("File renamed to '%s'\n" % (temp_filename,),
3799                                         noiselevel=-1)
3800                 finally:
3801                         sys.stdout = stdout_orig
3802                         sys.stderr = stderr_orig
3803                         if log_file is not None:
3804                                 log_file.close()
3805
3806                 self.returncode = rval
3807                 self.wait()
3808
3809 class BinpkgPrefetcher(CompositeTask):
3810
3811         __slots__ = ("pkg",) + \
3812                 ("pkg_path", "_bintree",)
3813
3814         def _start(self):
3815                 self._bintree = self.pkg.root_config.trees["bintree"]
3816                 fetcher = BinpkgFetcher(background=self.background,
3817                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3818                         scheduler=self.scheduler)
3819                 self.pkg_path = fetcher.pkg_path
3820                 self._start_task(fetcher, self._fetcher_exit)
3821
3822         def _fetcher_exit(self, fetcher):
3823
3824                 if self._default_exit(fetcher) != os.EX_OK:
3825                         self.wait()
3826                         return
3827
3828                 verifier = BinpkgVerifier(background=self.background,
3829                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3830                 self._start_task(verifier, self._verifier_exit)
3831
3832         def _verifier_exit(self, verifier):
3833                 if self._default_exit(verifier) != os.EX_OK:
3834                         self.wait()
3835                         return
3836
3837                 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3838
3839                 self._current_task = None
3840                 self.returncode = os.EX_OK
3841                 self.wait()
3842
3843 class BinpkgExtractorAsync(SpawnProcess):
3844
3845         __slots__ = ("image_dir", "pkg", "pkg_path")
3846
3847         _shell_binary = portage.const.BASH_BINARY
3848
3849         def _start(self):
3850                 self.args = [self._shell_binary, "-c",
3851                         "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3852                         (portage._shell_quote(self.pkg_path),
3853                         portage._shell_quote(self.image_dir))]
3854
3855                 self.env = self.pkg.root_config.settings.environ()
3856                 SpawnProcess._start(self)
3857
3858 class MergeListItem(CompositeTask):
3859
3860         """
3861         TODO: For parallel scheduling, everything here needs asynchronous
3862         execution support (start, poll, and wait methods).
3863         """
3864
3865         __slots__ = ("args_set",
3866                 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3867                 "find_blockers", "logger", "mtimedb", "pkg",
3868                 "pkg_count", "pkg_to_replace", "prefetcher",
3869                 "settings", "statusMessage", "world_atom") + \
3870                 ("_install_task",)
3871
3872         def _start(self):
3873
3874                 pkg = self.pkg
3875                 build_opts = self.build_opts
3876
3877                 if pkg.installed:
3878                         # uninstall,  executed by self.merge()
3879                         self.returncode = os.EX_OK
3880                         self.wait()
3881                         return
3882
3883                 args_set = self.args_set
3884                 find_blockers = self.find_blockers
3885                 logger = self.logger
3886                 mtimedb = self.mtimedb
3887                 pkg_count = self.pkg_count
3888                 scheduler = self.scheduler
3889                 settings = self.settings
3890                 world_atom = self.world_atom
3891                 ldpath_mtimes = mtimedb["ldpath"]
3892
3893                 action_desc = "Emerging"
3894                 preposition = "for"
3895                 if pkg.type_name == "binary":
3896                         action_desc += " binary"
3897
3898                 if build_opts.fetchonly:
3899                         action_desc = "Fetching"
3900
3901                 msg = "%s (%s of %s) %s" % \
3902                         (action_desc,
3903                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3904                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3905                         colorize("GOOD", pkg.cpv))
3906
3907                 portdb = pkg.root_config.trees["porttree"].dbapi
3908                 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3909                 if portdir_repo_name:
3910                         pkg_repo_name = pkg.metadata.get("repository")
3911                         if pkg_repo_name != portdir_repo_name:
3912                                 if not pkg_repo_name:
3913                                         pkg_repo_name = "unknown repo"
3914                                 msg += " from %s" % pkg_repo_name
3915
3916                 if pkg.root != "/":
3917                         msg += " %s %s" % (preposition, pkg.root)
3918
3919                 if not build_opts.pretend:
3920                         self.statusMessage(msg)
3921                         logger.log(" >>> emerge (%s of %s) %s to %s" % \
3922                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3923
3924                 if pkg.type_name == "ebuild":
3925
3926                         build = EbuildBuild(args_set=args_set,
3927                                 background=self.background,
3928                                 config_pool=self.config_pool,
3929                                 find_blockers=find_blockers,
3930                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3931                                 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3932                                 prefetcher=self.prefetcher, scheduler=scheduler,
3933                                 settings=settings, world_atom=world_atom)
3934
3935                         self._install_task = build
3936                         self._start_task(build, self._default_final_exit)
3937                         return
3938
3939                 elif pkg.type_name == "binary":
3940
3941                         binpkg = Binpkg(background=self.background,
3942                                 find_blockers=find_blockers,
3943                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3944                                 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3945                                 prefetcher=self.prefetcher, settings=settings,
3946                                 scheduler=scheduler, world_atom=world_atom)
3947
3948                         self._install_task = binpkg
3949                         self._start_task(binpkg, self._default_final_exit)
3950                         return
3951
3952         def _poll(self):
3953                 self._install_task.poll()
3954                 return self.returncode
3955
3956         def _wait(self):
3957                 self._install_task.wait()
3958                 return self.returncode
3959
3960         def merge(self):
3961
3962                 pkg = self.pkg
3963                 build_opts = self.build_opts
3964                 find_blockers = self.find_blockers
3965                 logger = self.logger
3966                 mtimedb = self.mtimedb
3967                 pkg_count = self.pkg_count
3968                 prefetcher = self.prefetcher
3969                 scheduler = self.scheduler
3970                 settings = self.settings
3971                 world_atom = self.world_atom
3972                 ldpath_mtimes = mtimedb["ldpath"]
3973
3974                 if pkg.installed:
3975                         if not (build_opts.buildpkgonly or \
3976                                 build_opts.fetchonly or build_opts.pretend):
3977
3978                                 uninstall = PackageUninstall(background=self.background,
3979                                         ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
3980                                         pkg=pkg, scheduler=scheduler, settings=settings)
3981
3982                                 uninstall.start()
3983                                 retval = uninstall.wait()
3984                                 if retval != os.EX_OK:
3985                                         return retval
3986                         return os.EX_OK
3987
3988                 if build_opts.fetchonly or \
3989                         build_opts.buildpkgonly:
3990                         return self.returncode
3991
3992                 retval = self._install_task.install()
3993                 return retval
3994
3995 class PackageMerge(AsynchronousTask):
3996         """
3997         TODO: Implement asynchronous merge so that the scheduler can
3998         run while a merge is executing.
3999         """
4000
4001         __slots__ = ("merge",)
4002
4003         def _start(self):
4004
4005                 pkg = self.merge.pkg
4006                 pkg_count = self.merge.pkg_count
4007
4008                 if pkg.installed:
4009                         action_desc = "Uninstalling"
4010                         preposition = "from"
4011                 else:
4012                         action_desc = "Installing"
4013                         preposition = "to"
4014
4015                 msg = "%s %s" % (action_desc, colorize("GOOD", pkg.cpv))
4016
4017                 if pkg.root != "/":
4018                         msg += " %s %s" % (preposition, pkg.root)
4019
4020                 if not self.merge.build_opts.fetchonly and \
4021                         not self.merge.build_opts.pretend and \
4022                         not self.merge.build_opts.buildpkgonly:
4023                         self.merge.statusMessage(msg)
4024
4025                 self.returncode = self.merge.merge()
4026                 self.wait()
4027
4028 class DependencyArg(object):
4029         def __init__(self, arg=None, root_config=None):
4030                 self.arg = arg
4031                 self.root_config = root_config
4032
4033         def __str__(self):
4034                 return str(self.arg)
4035
4036 class AtomArg(DependencyArg):
4037         def __init__(self, atom=None, **kwargs):
4038                 DependencyArg.__init__(self, **kwargs)
4039                 self.atom = atom
4040                 if not isinstance(self.atom, portage.dep.Atom):
4041                         self.atom = portage.dep.Atom(self.atom)
4042                 self.set = (self.atom, )
4043
4044 class PackageArg(DependencyArg):
4045         def __init__(self, package=None, **kwargs):
4046                 DependencyArg.__init__(self, **kwargs)
4047                 self.package = package
4048                 self.atom = portage.dep.Atom("=" + package.cpv)
4049                 self.set = (self.atom, )
4050
4051 class SetArg(DependencyArg):
4052         def __init__(self, set=None, **kwargs):
4053                 DependencyArg.__init__(self, **kwargs)
4054                 self.set = set
4055                 self.name = self.arg[len(SETPREFIX):]
4056
4057 class Dependency(SlotObject):
4058         __slots__ = ("atom", "blocker", "depth",
4059                 "parent", "onlydeps", "priority", "root")
4060         def __init__(self, **kwargs):
4061                 SlotObject.__init__(self, **kwargs)
4062                 if self.priority is None:
4063                         self.priority = DepPriority()
4064                 if self.depth is None:
4065                         self.depth = 0
4066
4067 class BlockerCache(portage.cache.mappings.MutableMapping):
4068         """This caches blockers of installed packages so that dep_check does not
4069         have to be done for every single installed package on every invocation of
4070         emerge.  The cache is invalidated whenever it is detected that something
4071         has changed that might alter the results of dep_check() calls:
4072                 1) the set of installed packages (including COUNTER) has changed
4073                 2) the old-style virtuals have changed
4074         """
4075
4076         # Number of uncached packages to trigger cache update, since
4077         # it's wasteful to update it for every vdb change.
4078         _cache_threshold = 5
4079
4080         class BlockerData(object):
4081
4082                 __slots__ = ("__weakref__", "atoms", "counter")
4083
4084                 def __init__(self, counter, atoms):
4085                         self.counter = counter
4086                         self.atoms = atoms
4087
4088         def __init__(self, myroot, vardb):
4089                 self._vardb = vardb
4090                 self._virtuals = vardb.settings.getvirtuals()
4091                 self._cache_filename = os.path.join(myroot,
4092                         portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4093                 self._cache_version = "1"
4094                 self._cache_data = None
4095                 self._modified = set()
4096                 self._load()
4097
4098         def _load(self):
4099                 try:
4100                         f = open(self._cache_filename, mode='rb')
4101                         mypickle = pickle.Unpickler(f)
4102                         try:
4103                                 mypickle.find_global = None
4104                         except AttributeError:
4105                                 # TODO: If py3k, override Unpickler.find_class().
4106                                 pass
4107                         self._cache_data = mypickle.load()
4108                         f.close()
4109                         del f
4110                 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4111                         if isinstance(e, pickle.UnpicklingError):
4112                                 writemsg("!!! Error loading '%s': %s\n" % \
4113                                         (self._cache_filename, str(e)), noiselevel=-1)
4114                         del e
4115
4116                 cache_valid = self._cache_data and \
4117                         isinstance(self._cache_data, dict) and \
4118                         self._cache_data.get("version") == self._cache_version and \
4119                         isinstance(self._cache_data.get("blockers"), dict)
4120                 if cache_valid:
4121                         # Validate all the atoms and counters so that
4122                         # corruption is detected as soon as possible.
4123                         invalid_items = set()
4124                         for k, v in self._cache_data["blockers"].iteritems():
4125                                 if not isinstance(k, basestring):
4126                                         invalid_items.add(k)
4127                                         continue
4128                                 try:
4129                                         if portage.catpkgsplit(k) is None:
4130                                                 invalid_items.add(k)
4131                                                 continue
4132                                 except portage.exception.InvalidData:
4133                                         invalid_items.add(k)
4134                                         continue
4135                                 if not isinstance(v, tuple) or \
4136                                         len(v) != 2:
4137                                         invalid_items.add(k)
4138                                         continue
4139                                 counter, atoms = v
4140                                 if not isinstance(counter, (int, long)):
4141                                         invalid_items.add(k)
4142                                         continue
4143                                 if not isinstance(atoms, (list, tuple)):
4144                                         invalid_items.add(k)
4145                                         continue
4146                                 invalid_atom = False
4147                                 for atom in atoms:
4148                                         if not isinstance(atom, basestring):
4149                                                 invalid_atom = True
4150                                                 break
4151                                         if atom[:1] != "!" or \
4152                                                 not portage.isvalidatom(
4153                                                 atom, allow_blockers=True):
4154                                                 invalid_atom = True
4155                                                 break
4156                                 if invalid_atom:
4157                                         invalid_items.add(k)
4158                                         continue
4159
4160                         for k in invalid_items:
4161                                 del self._cache_data["blockers"][k]
4162                         if not self._cache_data["blockers"]:
4163                                 cache_valid = False
4164
4165                 if not cache_valid:
4166                         self._cache_data = {"version":self._cache_version}
4167                         self._cache_data["blockers"] = {}
4168                         self._cache_data["virtuals"] = self._virtuals
4169                 self._modified.clear()
4170
4171         def flush(self):
4172                 """If the current user has permission and the internal blocker cache
4173                 been updated, save it to disk and mark it unmodified.  This is called
4174                 by emerge after it has proccessed blockers for all installed packages.
4175                 Currently, the cache is only written if the user has superuser
4176                 privileges (since that's required to obtain a lock), but all users
4177                 have read access and benefit from faster blocker lookups (as long as
4178                 the entire cache is still valid).  The cache is stored as a pickled
4179                 dict object with the following format:
4180
4181                 {
4182                         version : "1",
4183                         "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4184                         "virtuals" : vardb.settings.getvirtuals()
4185                 }
4186                 """
4187                 if len(self._modified) >= self._cache_threshold and \
4188                         secpass >= 2:
4189                         try:
4190                                 f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4191                                 pickle.dump(self._cache_data, f, protocol=2)
4192                                 f.close()
4193                                 portage.util.apply_secpass_permissions(
4194                                         self._cache_filename, gid=portage.portage_gid, mode=0644)
4195                         except (IOError, OSError), e:
4196                                 pass
4197                         self._modified.clear()
4198
4199         def __setitem__(self, cpv, blocker_data):
4200                 """
4201                 Update the cache and mark it as modified for a future call to
4202                 self.flush().
4203
4204                 @param cpv: Package for which to cache blockers.
4205                 @type cpv: String
4206                 @param blocker_data: An object with counter and atoms attributes.
4207                 @type blocker_data: BlockerData
4208                 """
4209                 self._cache_data["blockers"][cpv] = \
4210                         (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4211                 self._modified.add(cpv)
4212
4213         def __iter__(self):
4214                 if self._cache_data is None:
4215                         # triggered by python-trace
4216                         return iter([])
4217                 return iter(self._cache_data["blockers"])
4218
4219         def __delitem__(self, cpv):
4220                 del self._cache_data["blockers"][cpv]
4221
4222         def __getitem__(self, cpv):
4223                 """
4224                 @rtype: BlockerData
4225                 @returns: An object with counter and atoms attributes.
4226                 """
4227                 return self.BlockerData(*self._cache_data["blockers"][cpv])
4228
4229 class BlockerDB(object):
4230
4231         def __init__(self, root_config):
4232                 self._root_config = root_config
4233                 self._vartree = root_config.trees["vartree"]
4234                 self._portdb = root_config.trees["porttree"].dbapi
4235
4236                 self._dep_check_trees = None
4237                 self._fake_vartree = None
4238
4239         def _get_fake_vartree(self, acquire_lock=0):
4240                 fake_vartree = self._fake_vartree
4241                 if fake_vartree is None:
4242                         fake_vartree = FakeVartree(self._root_config,
4243                                 acquire_lock=acquire_lock)
4244                         self._fake_vartree = fake_vartree
4245                         self._dep_check_trees = { self._vartree.root : {
4246                                 "porttree"    :  fake_vartree,
4247                                 "vartree"     :  fake_vartree,
4248                         }}
4249                 else:
4250                         fake_vartree.sync(acquire_lock=acquire_lock)
4251                 return fake_vartree
4252
4253         def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4254                 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4255                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4256                 settings = self._vartree.settings
4257                 stale_cache = set(blocker_cache)
4258                 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4259                 dep_check_trees = self._dep_check_trees
4260                 vardb = fake_vartree.dbapi
4261                 installed_pkgs = list(vardb)
4262
4263                 for inst_pkg in installed_pkgs:
4264                         stale_cache.discard(inst_pkg.cpv)
4265                         cached_blockers = blocker_cache.get(inst_pkg.cpv)
4266                         if cached_blockers is not None and \
4267                                 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4268                                 cached_blockers = None
4269                         if cached_blockers is not None:
4270                                 blocker_atoms = cached_blockers.atoms
4271                         else:
4272                                 # Use aux_get() to trigger FakeVartree global
4273                                 # updates on *DEPEND when appropriate.
4274                                 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4275                                 try:
4276                                         portage.dep._dep_check_strict = False
4277                                         success, atoms = portage.dep_check(depstr,
4278                                                 vardb, settings, myuse=inst_pkg.use.enabled,
4279                                                 trees=dep_check_trees, myroot=inst_pkg.root)
4280                                 finally:
4281                                         portage.dep._dep_check_strict = True
4282                                 if not success:
4283                                         pkg_location = os.path.join(inst_pkg.root,
4284                                                 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4285                                         portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4286                                                 (pkg_location, atoms), noiselevel=-1)
4287                                         continue
4288
4289                                 blocker_atoms = [atom for atom in atoms \
4290                                         if atom.startswith("!")]
4291                                 blocker_atoms.sort()
4292                                 counter = long(inst_pkg.metadata["COUNTER"])
4293                                 blocker_cache[inst_pkg.cpv] = \
4294                                         blocker_cache.BlockerData(counter, blocker_atoms)
4295                 for cpv in stale_cache:
4296                         del blocker_cache[cpv]
4297                 blocker_cache.flush()
4298
4299                 blocker_parents = digraph()
4300                 blocker_atoms = []
4301                 for pkg in installed_pkgs:
4302                         for blocker_atom in blocker_cache[pkg.cpv].atoms:
4303                                 blocker_atom = blocker_atom.lstrip("!")
4304                                 blocker_atoms.append(blocker_atom)
4305                                 blocker_parents.add(blocker_atom, pkg)
4306
4307                 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4308                 blocking_pkgs = set()
4309                 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4310                         blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4311
4312                 # Check for blockers in the other direction.
4313                 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4314                 try:
4315                         portage.dep._dep_check_strict = False
4316                         success, atoms = portage.dep_check(depstr,
4317                                 vardb, settings, myuse=new_pkg.use.enabled,
4318                                 trees=dep_check_trees, myroot=new_pkg.root)
4319                 finally:
4320                         portage.dep._dep_check_strict = True
4321                 if not success:
4322                         # We should never get this far with invalid deps.
4323                         show_invalid_depstring_notice(new_pkg, depstr, atoms)
4324                         assert False
4325
4326                 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4327                         if atom[:1] == "!"]
4328                 if blocker_atoms:
4329                         blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4330                         for inst_pkg in installed_pkgs:
4331                                 try:
4332                                         blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4333                                 except (portage.exception.InvalidDependString, StopIteration):
4334                                         continue
4335                                 blocking_pkgs.add(inst_pkg)
4336
4337                 return blocking_pkgs
4338
4339 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4340
4341         msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4342                 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4343         p_type, p_root, p_key, p_status = parent_node
4344         msg = []
4345         if p_status == "nomerge":
4346                 category, pf = portage.catsplit(p_key)
4347                 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4348                 msg.append("Portage is unable to process the dependencies of the ")
4349                 msg.append("'%s' package. " % p_key)
4350                 msg.append("In order to correct this problem, the package ")
4351                 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4352                 msg.append("As a temporary workaround, the --nodeps option can ")
4353                 msg.append("be used to ignore all dependencies.  For reference, ")
4354                 msg.append("the problematic dependencies can be found in the ")
4355                 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4356         else:
4357                 msg.append("This package can not be installed. ")
4358                 msg.append("Please notify the '%s' package maintainer " % p_key)
4359                 msg.append("about this problem.")
4360
4361         msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4362         writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4363
4364 class PackageVirtualDbapi(portage.dbapi):
4365         """
4366         A dbapi-like interface class that represents the state of the installed
4367         package database as new packages are installed, replacing any packages
4368         that previously existed in the same slot. The main difference between
4369         this class and fakedbapi is that this one uses Package instances
4370         internally (passed in via cpv_inject() and cpv_remove() calls).
4371         """
4372         def __init__(self, settings):
4373                 portage.dbapi.__init__(self)
4374                 self.settings = settings
4375                 self._match_cache = {}
4376                 self._cp_map = {}
4377                 self._cpv_map = {}
4378
4379         def clear(self):
4380                 """
4381                 Remove all packages.
4382                 """
4383                 if self._cpv_map:
4384                         self._clear_cache()
4385                         self._cp_map.clear()
4386                         self._cpv_map.clear()
4387
4388         def copy(self):
4389                 obj = PackageVirtualDbapi(self.settings)
4390                 obj._match_cache = self._match_cache.copy()
4391                 obj._cp_map = self._cp_map.copy()
4392                 for k, v in obj._cp_map.iteritems():
4393                         obj._cp_map[k] = v[:]
4394                 obj._cpv_map = self._cpv_map.copy()
4395                 return obj
4396
4397         def __iter__(self):
4398                 return self._cpv_map.itervalues()
4399
4400         def __contains__(self, item):
4401                 existing = self._cpv_map.get(item.cpv)
4402                 if existing is not None and \
4403                         existing == item:
4404                         return True
4405                 return False
4406
4407         def get(self, item, default=None):
4408                 cpv = getattr(item, "cpv", None)
4409                 if cpv is None:
4410                         if len(item) != 4:
4411                                 return default
4412                         type_name, root, cpv, operation = item
4413
4414                 existing = self._cpv_map.get(cpv)
4415                 if existing is not None and \
4416                         existing == item:
4417                         return existing
4418                 return default
4419
4420         def match_pkgs(self, atom):
4421                 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4422
4423         def _clear_cache(self):
4424                 if self._categories is not None:
4425                         self._categories = None
4426                 if self._match_cache:
4427                         self._match_cache = {}
4428
4429         def match(self, origdep, use_cache=1):
4430                 result = self._match_cache.get(origdep)
4431                 if result is not None:
4432                         return result[:]
4433                 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4434                 self._match_cache[origdep] = result
4435                 return result[:]
4436
4437         def cpv_exists(self, cpv):
4438                 return cpv in self._cpv_map
4439
4440         def cp_list(self, mycp, use_cache=1):
4441                 cachelist = self._match_cache.get(mycp)
4442                 # cp_list() doesn't expand old-style virtuals
4443                 if cachelist and cachelist[0].startswith(mycp):
4444                         return cachelist[:]
4445                 cpv_list = self._cp_map.get(mycp)
4446                 if cpv_list is None:
4447                         cpv_list = []
4448                 else:
4449                         cpv_list = [pkg.cpv for pkg in cpv_list]
4450                 self._cpv_sort_ascending(cpv_list)
4451                 if not (not cpv_list and mycp.startswith("virtual/")):
4452                         self._match_cache[mycp] = cpv_list
4453                 return cpv_list[:]
4454
4455         def cp_all(self):
4456                 return list(self._cp_map)
4457
4458         def cpv_all(self):
4459                 return list(self._cpv_map)
4460
4461         def cpv_inject(self, pkg):
4462                 cp_list = self._cp_map.get(pkg.cp)
4463                 if cp_list is None:
4464                         cp_list = []
4465                         self._cp_map[pkg.cp] = cp_list
4466                 e_pkg = self._cpv_map.get(pkg.cpv)
4467                 if e_pkg is not None:
4468                         if e_pkg == pkg:
4469                                 return
4470                         self.cpv_remove(e_pkg)
4471                 for e_pkg in cp_list:
4472                         if e_pkg.slot_atom == pkg.slot_atom:
4473                                 if e_pkg == pkg:
4474                                         return
4475                                 self.cpv_remove(e_pkg)
4476                                 break
4477                 cp_list.append(pkg)
4478                 self._cpv_map[pkg.cpv] = pkg
4479                 self._clear_cache()
4480
4481         def cpv_remove(self, pkg):
4482                 old_pkg = self._cpv_map.get(pkg.cpv)
4483                 if old_pkg != pkg:
4484                         raise KeyError(pkg)
4485                 self._cp_map[pkg.cp].remove(pkg)
4486                 del self._cpv_map[pkg.cpv]
4487                 self._clear_cache()
4488
4489         def aux_get(self, cpv, wants):
4490                 metadata = self._cpv_map[cpv].metadata
4491                 return [metadata.get(x, "") for x in wants]
4492
4493         def aux_update(self, cpv, values):
4494                 self._cpv_map[cpv].metadata.update(values)
4495                 self._clear_cache()
4496
4497 class depgraph(object):
4498
4499         pkg_tree_map = RootConfig.pkg_tree_map
4500
4501         _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4502
4503         def __init__(self, settings, trees, myopts, myparams, spinner):
4504                 self.settings = settings
4505                 self.target_root = settings["ROOT"]
4506                 self.myopts = myopts
4507                 self.myparams = myparams
4508                 self.edebug = 0
4509                 if settings.get("PORTAGE_DEBUG", "") == "1":
4510                         self.edebug = 1
4511                 self.spinner = spinner
4512                 self._running_root = trees["/"]["root_config"]
4513                 self._opts_no_restart = Scheduler._opts_no_restart
4514                 self.pkgsettings = {}
4515                 # Maps slot atom to package for each Package added to the graph.
4516                 self._slot_pkg_map = {}
4517                 # Maps nodes to the reasons they were selected for reinstallation.
4518                 self._reinstall_nodes = {}
4519                 self.mydbapi = {}
4520                 self.trees = {}
4521                 self._trees_orig = trees
4522                 self.roots = {}
4523                 # Contains a filtered view of preferred packages that are selected
4524                 # from available repositories.
4525                 self._filtered_trees = {}
4526                 # Contains installed packages and new packages that have been added
4527                 # to the graph.
4528                 self._graph_trees = {}
4529                 # All Package instances
4530                 self._pkg_cache = {}
4531                 for myroot in trees:
4532                         self.trees[myroot] = {}
4533                         # Create a RootConfig instance that references
4534                         # the FakeVartree instead of the real one.
4535                         self.roots[myroot] = RootConfig(
4536                                 trees[myroot]["vartree"].settings,
4537                                 self.trees[myroot],
4538                                 trees[myroot]["root_config"].setconfig)
4539                         for tree in ("porttree", "bintree"):
4540                                 self.trees[myroot][tree] = trees[myroot][tree]
4541                         self.trees[myroot]["vartree"] = \
4542                                 FakeVartree(trees[myroot]["root_config"],
4543                                         pkg_cache=self._pkg_cache)
4544                         self.pkgsettings[myroot] = portage.config(
4545                                 clone=self.trees[myroot]["vartree"].settings)
4546                         self._slot_pkg_map[myroot] = {}
4547                         vardb = self.trees[myroot]["vartree"].dbapi
4548                         preload_installed_pkgs = "--nodeps" not in self.myopts and \
4549                                 "--buildpkgonly" not in self.myopts
4550                         # This fakedbapi instance will model the state that the vdb will
4551                         # have after new packages have been installed.
4552                         fakedb = PackageVirtualDbapi(vardb.settings)
4553                         if preload_installed_pkgs:
4554                                 for pkg in vardb:
4555                                         self.spinner.update()
4556                                         # This triggers metadata updates via FakeVartree.
4557                                         vardb.aux_get(pkg.cpv, [])
4558                                         fakedb.cpv_inject(pkg)
4559
4560                         # Now that the vardb state is cached in our FakeVartree,
4561                         # we won't be needing the real vartree cache for awhile.
4562                         # To make some room on the heap, clear the vardbapi
4563                         # caches.
4564                         trees[myroot]["vartree"].dbapi._clear_cache()
4565                         gc.collect()
4566
4567                         self.mydbapi[myroot] = fakedb
4568                         def graph_tree():
4569                                 pass
4570                         graph_tree.dbapi = fakedb
4571                         self._graph_trees[myroot] = {}
4572                         self._filtered_trees[myroot] = {}
4573                         # Substitute the graph tree for the vartree in dep_check() since we
4574                         # want atom selections to be consistent with package selections
4575                         # have already been made.
4576                         self._graph_trees[myroot]["porttree"]   = graph_tree
4577                         self._graph_trees[myroot]["vartree"]    = graph_tree
4578                         def filtered_tree():
4579                                 pass
4580                         filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4581                         self._filtered_trees[myroot]["porttree"] = filtered_tree
4582
4583                         # Passing in graph_tree as the vartree here could lead to better
4584                         # atom selections in some cases by causing atoms for packages that
4585                         # have been added to the graph to be preferred over other choices.
4586                         # However, it can trigger atom selections that result in
4587                         # unresolvable direct circular dependencies. For example, this
4588                         # happens with gwydion-dylan which depends on either itself or
4589                         # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4590                         # gwydion-dylan-bin needs to be selected in order to avoid a
4591                         # an unresolvable direct circular dependency.
4592                         #
4593                         # To solve the problem described above, pass in "graph_db" so that
4594                         # packages that have been added to the graph are distinguishable
4595                         # from other available packages and installed packages. Also, pass
4596                         # the parent package into self._select_atoms() calls so that
4597                         # unresolvable direct circular dependencies can be detected and
4598                         # avoided when possible.
4599                         self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4600                         self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4601
4602                         dbs = []
4603                         portdb = self.trees[myroot]["porttree"].dbapi
4604                         bindb  = self.trees[myroot]["bintree"].dbapi
4605                         vardb  = self.trees[myroot]["vartree"].dbapi
4606                         #               (db, pkg_type, built, installed, db_keys)
4607                         if "--usepkgonly" not in self.myopts:
4608                                 db_keys = list(portdb._aux_cache_keys)
4609                                 dbs.append((portdb, "ebuild", False, False, db_keys))
4610                         if "--usepkg" in self.myopts:
4611                                 db_keys = list(bindb._aux_cache_keys)
4612                                 dbs.append((bindb,  "binary", True, False, db_keys))
4613                         db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4614                         dbs.append((vardb, "installed", True, True, db_keys))
4615                         self._filtered_trees[myroot]["dbs"] = dbs
4616                         if "--usepkg" in self.myopts:
4617                                 self.trees[myroot]["bintree"].populate(
4618                                         "--getbinpkg" in self.myopts,
4619                                         "--getbinpkgonly" in self.myopts)
4620                 del trees
4621
4622                 self.digraph=portage.digraph()
4623                 # contains all sets added to the graph
4624                 self._sets = {}
4625                 # contains atoms given as arguments
4626                 self._sets["args"] = InternalPackageSet()
4627                 # contains all atoms from all sets added to the graph, including
4628                 # atoms given as arguments
4629                 self._set_atoms = InternalPackageSet()
4630                 self._atom_arg_map = {}
4631                 # contains all nodes pulled in by self._set_atoms
4632                 self._set_nodes = set()
4633                 # Contains only Blocker -> Uninstall edges
4634                 self._blocker_uninstalls = digraph()
4635                 # Contains only Package -> Blocker edges
4636                 self._blocker_parents = digraph()
4637                 # Contains only irrelevant Package -> Blocker edges
4638                 self._irrelevant_blockers = digraph()
4639                 # Contains only unsolvable Package -> Blocker edges
4640                 self._unsolvable_blockers = digraph()
4641                 # Contains all Blocker -> Blocked Package edges
4642                 self._blocked_pkgs = digraph()
4643                 # Contains world packages that have been protected from
4644                 # uninstallation but may not have been added to the graph
4645                 # if the graph is not complete yet.
4646                 self._blocked_world_pkgs = {}
4647                 self._slot_collision_info = {}
4648                 # Slot collision nodes are not allowed to block other packages since
4649                 # blocker validation is only able to account for one package per slot.
4650                 self._slot_collision_nodes = set()
4651                 self._parent_atoms = {}
4652                 self._slot_conflict_parent_atoms = set()
4653                 self._serialized_tasks_cache = None
4654                 self._scheduler_graph = None
4655                 self._displayed_list = None
4656                 self._pprovided_args = []
4657                 self._missing_args = []
4658                 self._masked_installed = set()
4659                 self._unsatisfied_deps_for_display = []
4660                 self._unsatisfied_blockers_for_display = None
4661                 self._circular_deps_for_display = None
4662                 self._dep_stack = []
4663                 self._unsatisfied_deps = []
4664                 self._initially_unsatisfied_deps = []
4665                 self._ignored_deps = []
4666                 self._required_set_names = set(["system", "world"])
4667                 self._select_atoms = self._select_atoms_highest_available
4668                 self._select_package = self._select_pkg_highest_available
4669                 self._highest_pkg_cache = {}
4670
4671         def _show_slot_collision_notice(self):
4672                 """Show an informational message advising the user to mask one of the
4673                 the packages. In some cases it may be possible to resolve this
4674                 automatically, but support for backtracking (removal nodes that have
4675                 already been selected) will be required in order to handle all possible
4676                 cases.
4677                 """
4678
4679                 if not self._slot_collision_info:
4680                         return
4681
4682                 self._show_merge_list()
4683
4684                 msg = []
4685                 msg.append("\n!!! Multiple package instances within a single " + \
4686                         "package slot have been pulled\n")
4687                 msg.append("!!! into the dependency graph, resulting" + \
4688                         " in a slot conflict:\n\n")
4689                 indent = "  "
4690                 # Max number of parents shown, to avoid flooding the display.
4691                 max_parents = 3
4692                 explanation_columns = 70
4693                 explanations = 0
4694                 for (slot_atom, root), slot_nodes \
4695                         in self._slot_collision_info.iteritems():
4696                         msg.append(str(slot_atom))
4697                         msg.append("\n\n")
4698
4699                         for node in slot_nodes:
4700                                 msg.append(indent)
4701                                 msg.append(str(node))
4702                                 parent_atoms = self._parent_atoms.get(node)
4703                                 if parent_atoms:
4704                                         pruned_list = set()
4705                                         # Prefer conflict atoms over others.
4706                                         for parent_atom in parent_atoms:
4707                                                 if len(pruned_list) >= max_parents:
4708                                                         break
4709                                                 if parent_atom in self._slot_conflict_parent_atoms:
4710                                                         pruned_list.add(parent_atom)
4711
4712                                         # If this package was pulled in by conflict atoms then
4713                                         # show those alone since those are the most interesting.
4714                                         if not pruned_list:
4715                                                 # When generating the pruned list, prefer instances
4716                                                 # of DependencyArg over instances of Package.
4717                                                 for parent_atom in parent_atoms:
4718                                                         if len(pruned_list) >= max_parents:
4719                                                                 break
4720                                                         parent, atom = parent_atom
4721                                                         if isinstance(parent, DependencyArg):
4722                                                                 pruned_list.add(parent_atom)
4723                                                 # Prefer Packages instances that themselves have been
4724                                                 # pulled into collision slots.
4725                                                 for parent_atom in parent_atoms:
4726                                                         if len(pruned_list) >= max_parents:
4727                                                                 break
4728                                                         parent, atom = parent_atom
4729                                                         if isinstance(parent, Package) and \
4730                                                                 (parent.slot_atom, parent.root) \
4731                                                                 in self._slot_collision_info:
4732                                                                 pruned_list.add(parent_atom)
4733                                                 for parent_atom in parent_atoms:
4734                                                         if len(pruned_list) >= max_parents:
4735                                                                 break
4736                                                         pruned_list.add(parent_atom)
4737                                         omitted_parents = len(parent_atoms) - len(pruned_list)
4738                                         parent_atoms = pruned_list
4739                                         msg.append(" pulled in by\n")
4740                                         for parent_atom in parent_atoms:
4741                                                 parent, atom = parent_atom
4742                                                 msg.append(2*indent)
4743                                                 if isinstance(parent,
4744                                                         (PackageArg, AtomArg)):
4745                                                         # For PackageArg and AtomArg types, it's
4746                                                         # redundant to display the atom attribute.
4747                                                         msg.append(str(parent))
4748                                                 else:
4749                                                         # Display the specific atom from SetArg or
4750                                                         # Package types.
4751                                                         msg.append("%s required by %s" % (atom, parent))
4752                                                 msg.append("\n")
4753                                         if omitted_parents:
4754                                                 msg.append(2*indent)
4755                                                 msg.append("(and %d more)\n" % omitted_parents)
4756                                 else:
4757                                         msg.append(" (no parents)\n")
4758                                 msg.append("\n")
4759                         explanation = self._slot_conflict_explanation(slot_nodes)
4760                         if explanation:
4761                                 explanations += 1
4762                                 msg.append(indent + "Explanation:\n\n")
4763                                 for line in textwrap.wrap(explanation, explanation_columns):
4764                                         msg.append(2*indent + line + "\n")
4765                                 msg.append("\n")
4766                 msg.append("\n")
4767                 sys.stderr.write("".join(msg))
4768                 sys.stderr.flush()
4769
4770                 explanations_for_all = explanations == len(self._slot_collision_info)
4771
4772                 if explanations_for_all or "--quiet" in self.myopts:
4773                         return
4774
4775                 msg = []
4776                 msg.append("It may be possible to solve this problem ")
4777                 msg.append("by using package.mask to prevent one of ")
4778                 msg.append("those packages from being selected. ")
4779                 msg.append("However, it is also possible that conflicting ")
4780                 msg.append("dependencies exist such that they are impossible to ")
4781                 msg.append("satisfy simultaneously.  If such a conflict exists in ")
4782                 msg.append("the dependencies of two different packages, then those ")
4783                 msg.append("packages can not be installed simultaneously.")
4784
4785                 from formatter import AbstractFormatter, DumbWriter
4786                 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4787                 for x in msg:
4788                         f.add_flowing_data(x)
4789                 f.end_paragraph(1)
4790
4791                 msg = []
4792                 msg.append("For more information, see MASKED PACKAGES ")
4793                 msg.append("section in the emerge man page or refer ")
4794                 msg.append("to the Gentoo Handbook.")
4795                 for x in msg:
4796                         f.add_flowing_data(x)
4797                 f.end_paragraph(1)
4798                 f.writer.flush()
4799
4800         def _slot_conflict_explanation(self, slot_nodes):
4801                 """
4802                 When a slot conflict occurs due to USE deps, there are a few
4803                 different cases to consider:
4804
4805                 1) New USE are correctly set but --newuse wasn't requested so an
4806                    installed package with incorrect USE happened to get pulled
4807                    into graph before the new one.
4808
4809                 2) New USE are incorrectly set but an installed package has correct
4810                    USE so it got pulled into the graph, and a new instance also got
4811                    pulled in due to --newuse or an upgrade.
4812
4813                 3) Multiple USE deps exist that can't be satisfied simultaneously,
4814                    and multiple package instances got pulled into the same slot to
4815                    satisfy the conflicting deps.
4816
4817                 Currently, explanations and suggested courses of action are generated
4818                 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4819                 """
4820
4821                 if len(slot_nodes) != 2:
4822                         # Suggestions are only implemented for
4823                         # conflicts between two packages.
4824                         return None
4825
4826                 all_conflict_atoms = self._slot_conflict_parent_atoms
4827                 matched_node = None
4828                 matched_atoms = None
4829                 unmatched_node = None
4830                 for node in slot_nodes:
4831                         parent_atoms = self._parent_atoms.get(node)
4832                         if not parent_atoms:
4833                                 # Normally, there are always parent atoms. If there are
4834                                 # none then something unexpected is happening and there's
4835                                 # currently no suggestion for this case.
4836                                 return None
4837                         conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4838                         for parent_atom in conflict_atoms:
4839                                 parent, atom = parent_atom
4840                                 if not atom.use:
4841                                         # Suggestions are currently only implemented for cases
4842                                         # in which all conflict atoms have USE deps.
4843                                         return None
4844                         if conflict_atoms:
4845                                 if matched_node is not None:
4846                                         # If conflict atoms match multiple nodes
4847                                         # then there's no suggestion.
4848                                         return None
4849                                 matched_node = node
4850                                 matched_atoms = conflict_atoms
4851                         else:
4852                                 if unmatched_node is not None:
4853                                         # Neither node is matched by conflict atoms, and
4854                                         # there is no suggestion for this case.
4855                                         return None
4856                                 unmatched_node = node
4857
4858                 if matched_node is None or unmatched_node is None:
4859                         # This shouldn't happen.
4860                         return None
4861
4862                 if unmatched_node.installed and not matched_node.installed and \
4863                         unmatched_node.cpv == matched_node.cpv:
4864                         # If the conflicting packages are the same version then
4865                         # --newuse should be all that's needed. If they are different
4866                         # versions then there's some other problem.
4867                         return "New USE are correctly set, but --newuse wasn't" + \
4868                                 " requested, so an installed package with incorrect USE " + \
4869                                 "happened to get pulled into the dependency graph. " + \
4870                                 "In order to solve " + \
4871                                 "this, either specify the --newuse option or explicitly " + \
4872                                 " reinstall '%s'." % matched_node.slot_atom
4873
4874                 if matched_node.installed and not unmatched_node.installed:
4875                         atoms = sorted(set(atom for parent, atom in matched_atoms))
4876                         explanation = ("New USE for '%s' are incorrectly set. " + \
4877                                 "In order to solve this, adjust USE to satisfy '%s'") % \
4878                                 (matched_node.slot_atom, atoms[0])
4879                         if len(atoms) > 1:
4880                                 for atom in atoms[1:-1]:
4881                                         explanation += ", '%s'" % (atom,)
4882                                 if len(atoms) > 2:
4883                                         explanation += ","
4884                                 explanation += " and '%s'" % (atoms[-1],)
4885                         explanation += "."
4886                         return explanation
4887
4888                 return None
4889
4890         def _process_slot_conflicts(self):
4891                 """
4892                 Process slot conflict data to identify specific atoms which
4893                 lead to conflict. These atoms only match a subset of the
4894                 packages that have been pulled into a given slot.
4895                 """
4896                 for (slot_atom, root), slot_nodes \
4897                         in self._slot_collision_info.iteritems():
4898
4899                         all_parent_atoms = set()
4900                         for pkg in slot_nodes:
4901                                 parent_atoms = self._parent_atoms.get(pkg)
4902                                 if not parent_atoms:
4903                                         continue
4904                                 all_parent_atoms.update(parent_atoms)
4905
4906                         for pkg in slot_nodes:
4907                                 parent_atoms = self._parent_atoms.get(pkg)
4908                                 if parent_atoms is None:
4909                                         parent_atoms = set()
4910                                         self._parent_atoms[pkg] = parent_atoms
4911                                 for parent_atom in all_parent_atoms:
4912                                         if parent_atom in parent_atoms:
4913                                                 continue
4914                                         # Use package set for matching since it will match via
4915                                         # PROVIDE when necessary, while match_from_list does not.
4916                                         parent, atom = parent_atom
4917                                         atom_set = InternalPackageSet(
4918                                                 initial_atoms=(atom,))
4919                                         if atom_set.findAtomForPackage(pkg):
4920                                                 parent_atoms.add(parent_atom)
4921                                         else:
4922                                                 self._slot_conflict_parent_atoms.add(parent_atom)
4923
4924         def _reinstall_for_flags(self, forced_flags,
4925                 orig_use, orig_iuse, cur_use, cur_iuse):
4926                 """Return a set of flags that trigger reinstallation, or None if there
4927                 are no such flags."""
4928                 if "--newuse" in self.myopts:
4929                         flags = set(orig_iuse.symmetric_difference(
4930                                 cur_iuse).difference(forced_flags))
4931                         flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4932                                 cur_iuse.intersection(cur_use)))
4933                         if flags:
4934                                 return flags
4935                 elif "changed-use" == self.myopts.get("--reinstall"):
4936                         flags = orig_iuse.intersection(orig_use).symmetric_difference(
4937                                 cur_iuse.intersection(cur_use))
4938                         if flags:
4939                                 return flags
4940                 return None
4941
4942         def _create_graph(self, allow_unsatisfied=False):
4943                 dep_stack = self._dep_stack
4944                 while dep_stack:
4945                         self.spinner.update()
4946                         dep = dep_stack.pop()
4947                         if isinstance(dep, Package):
4948                                 if not self._add_pkg_deps(dep,
4949                                         allow_unsatisfied=allow_unsatisfied):
4950                                         return 0
4951                                 continue
4952                         if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4953                                 return 0
4954                 return 1
4955
4956         def _add_dep(self, dep, allow_unsatisfied=False):
4957                 debug = "--debug" in self.myopts
4958                 buildpkgonly = "--buildpkgonly" in self.myopts
4959                 nodeps = "--nodeps" in self.myopts
4960                 empty = "empty" in self.myparams
4961                 deep = "deep" in self.myparams
4962                 update = "--update" in self.myopts and dep.depth <= 1
4963                 if dep.blocker:
4964                         if not buildpkgonly and \
4965                                 not nodeps and \
4966                                 dep.parent not in self._slot_collision_nodes:
4967                                 if dep.parent.onlydeps:
4968                                         # It's safe to ignore blockers if the
4969                                         # parent is an --onlydeps node.
4970                                         return 1
4971                                 # The blocker applies to the root where
4972                                 # the parent is or will be installed.
4973                                 blocker = Blocker(atom=dep.atom,
4974                                         eapi=dep.parent.metadata["EAPI"],
4975                                         root=dep.parent.root)
4976                                 self._blocker_parents.add(blocker, dep.parent)
4977                         return 1
4978                 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
4979                         onlydeps=dep.onlydeps)
4980                 if not dep_pkg:
4981                         if dep.priority.optional:
4982                                 # This could be an unecessary build-time dep
4983                                 # pulled in by --with-bdeps=y.
4984                                 return 1
4985                         if allow_unsatisfied:
4986                                 self._unsatisfied_deps.append(dep)
4987                                 return 1
4988                         self._unsatisfied_deps_for_display.append(
4989                                 ((dep.root, dep.atom), {"myparent":dep.parent}))
4990                         return 0
4991                 # In some cases, dep_check will return deps that shouldn't
4992                 # be proccessed any further, so they are identified and
4993                 # discarded here. Try to discard as few as possible since
4994                 # discarded dependencies reduce the amount of information
4995                 # available for optimization of merge order.
4996                 if dep.priority.satisfied and \
4997                         not dep_pkg.installed and \
4998                         not (existing_node or empty or deep or update):
4999                         myarg = None
5000                         if dep.root == self.target_root:
5001                                 try:
5002                                         myarg = self._iter_atoms_for_pkg(dep_pkg).next()
5003                                 except StopIteration:
5004                                         pass
5005                                 except portage.exception.InvalidDependString:
5006                                         if not dep_pkg.installed:
5007                                                 # This shouldn't happen since the package
5008                                                 # should have been masked.
5009                                                 raise
5010                         if not myarg:
5011                                 self._ignored_deps.append(dep)
5012                                 return 1
5013
5014                 if not self._add_pkg(dep_pkg, dep):
5015                         return 0
5016                 return 1
5017
5018         def _add_pkg(self, pkg, dep):
5019                 myparent = None
5020                 priority = None
5021                 depth = 0
5022                 if dep is None:
5023                         dep = Dependency()
5024                 else:
5025                         myparent = dep.parent
5026                         priority = dep.priority
5027                         depth = dep.depth
5028                 if priority is None:
5029                         priority = DepPriority()
5030                 """
5031                 Fills the digraph with nodes comprised of packages to merge.
5032                 mybigkey is the package spec of the package to merge.
5033                 myparent is the package depending on mybigkey ( or None )
5034                 addme = Should we add this package to the digraph or are we just looking at it's deps?
5035                         Think --onlydeps, we need to ignore packages in that case.
5036                 #stuff to add:
5037                 #SLOT-aware emerge
5038                 #IUSE-aware emerge -> USE DEP aware depgraph
5039                 #"no downgrade" emerge
5040                 """
5041                 # Ensure that the dependencies of the same package
5042                 # are never processed more than once.
5043                 previously_added = pkg in self.digraph
5044
5045                 # select the correct /var database that we'll be checking against
5046                 vardbapi = self.trees[pkg.root]["vartree"].dbapi
5047                 pkgsettings = self.pkgsettings[pkg.root]
5048
5049                 arg_atoms = None
5050                 if True:
5051                         try:
5052                                 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
5053                         except portage.exception.InvalidDependString, e:
5054                                 if not pkg.installed:
5055                                         show_invalid_depstring_notice(
5056                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5057                                         return 0
5058                                 del e
5059
5060                 if not pkg.onlydeps:
5061                         if not pkg.installed and \
5062                                 "empty" not in self.myparams and \
5063                                 vardbapi.match(pkg.slot_atom):
5064                                 # Increase the priority of dependencies on packages that
5065                                 # are being rebuilt. This optimizes merge order so that
5066                                 # dependencies are rebuilt/updated as soon as possible,
5067                                 # which is needed especially when emerge is called by
5068                                 # revdep-rebuild since dependencies may be affected by ABI
5069                                 # breakage that has rendered them useless. Don't adjust
5070                                 # priority here when in "empty" mode since all packages
5071                                 # are being merged in that case.
5072                                 priority.rebuild = True
5073
5074                         existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
5075                         slot_collision = False
5076                         if existing_node:
5077                                 existing_node_matches = pkg.cpv == existing_node.cpv
5078                                 if existing_node_matches and \
5079                                         pkg != existing_node and \
5080                                         dep.atom is not None:
5081                                         # Use package set for matching since it will match via
5082                                         # PROVIDE when necessary, while match_from_list does not.
5083                                         atom_set = InternalPackageSet(initial_atoms=[dep.atom])
5084                                         if not atom_set.findAtomForPackage(existing_node):
5085                                                 existing_node_matches = False
5086                                 if existing_node_matches:
5087                                         # The existing node can be reused.
5088                                         if arg_atoms:
5089                                                 for parent_atom in arg_atoms:
5090                                                         parent, atom = parent_atom
5091                                                         self.digraph.add(existing_node, parent,
5092                                                                 priority=priority)
5093                                                         self._add_parent_atom(existing_node, parent_atom)
5094                                         # If a direct circular dependency is not an unsatisfied
5095                                         # buildtime dependency then drop it here since otherwise
5096                                         # it can skew the merge order calculation in an unwanted
5097                                         # way.
5098                                         if existing_node != myparent or \
5099                                                 (priority.buildtime and not priority.satisfied):
5100                                                 self.digraph.addnode(existing_node, myparent,
5101                                                         priority=priority)
5102                                                 if dep.atom is not None and dep.parent is not None:
5103                                                         self._add_parent_atom(existing_node,
5104                                                                 (dep.parent, dep.atom))
5105                                         return 1
5106                                 else:
5107
5108                                         # A slot collision has occurred.  Sometimes this coincides
5109                                         # with unresolvable blockers, so the slot collision will be
5110                                         # shown later if there are no unresolvable blockers.
5111                                         self._add_slot_conflict(pkg)
5112                                         slot_collision = True
5113
5114                         if slot_collision:
5115                                 # Now add this node to the graph so that self.display()
5116                                 # can show use flags and --tree portage.output.  This node is
5117                                 # only being partially added to the graph.  It must not be
5118                                 # allowed to interfere with the other nodes that have been
5119                                 # added.  Do not overwrite data for existing nodes in
5120                                 # self.mydbapi since that data will be used for blocker
5121                                 # validation.
5122                                 # Even though the graph is now invalid, continue to process
5123                                 # dependencies so that things like --fetchonly can still
5124                                 # function despite collisions.
5125                                 pass
5126                         elif not previously_added:
5127                                 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
5128                                 self.mydbapi[pkg.root].cpv_inject(pkg)
5129                                 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
5130
5131                         if not pkg.installed:
5132                                 # Allow this package to satisfy old-style virtuals in case it
5133                                 # doesn't already. Any pre-existing providers will be preferred
5134                                 # over this one.
5135                                 try:
5136                                         pkgsettings.setinst(pkg.cpv, pkg.metadata)
5137                                         # For consistency, also update the global virtuals.
5138                                         settings = self.roots[pkg.root].settings
5139                                         settings.unlock()
5140                                         settings.setinst(pkg.cpv, pkg.metadata)
5141                                         settings.lock()
5142                                 except portage.exception.InvalidDependString, e:
5143                                         show_invalid_depstring_notice(
5144                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5145                                         del e
5146                                         return 0
5147
5148                 if arg_atoms:
5149                         self._set_nodes.add(pkg)
5150
5151                 # Do this even when addme is False (--onlydeps) so that the
5152                 # parent/child relationship is always known in case
5153                 # self._show_slot_collision_notice() needs to be called later.
5154                 self.digraph.add(pkg, myparent, priority=priority)
5155                 if dep.atom is not None and dep.parent is not None:
5156                         self._add_parent_atom(pkg, (dep.parent, dep.atom))
5157
5158                 if arg_atoms:
5159                         for parent_atom in arg_atoms:
5160                                 parent, atom = parent_atom
5161                                 self.digraph.add(pkg, parent, priority=priority)
5162                                 self._add_parent_atom(pkg, parent_atom)
5163
5164                 """ This section determines whether we go deeper into dependencies or not.
5165                     We want to go deeper on a few occasions:
5166                     Installing package A, we need to make sure package A's deps are met.
5167                     emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5168                     If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5169                 """
5170                 dep_stack = self._dep_stack
5171                 if "recurse" not in self.myparams:
5172                         return 1
5173                 elif pkg.installed and \
5174                         "deep" not in self.myparams:
5175                         dep_stack = self._ignored_deps
5176
5177                 self.spinner.update()
5178
5179                 if arg_atoms:
5180                         depth = 0
5181                 pkg.depth = depth
5182                 if not previously_added:
5183                         dep_stack.append(pkg)
5184                 return 1
5185
5186         def _add_parent_atom(self, pkg, parent_atom):
5187                 parent_atoms = self._parent_atoms.get(pkg)
5188                 if parent_atoms is None:
5189                         parent_atoms = set()
5190                         self._parent_atoms[pkg] = parent_atoms
5191                 parent_atoms.add(parent_atom)
5192
5193         def _add_slot_conflict(self, pkg):
5194                 self._slot_collision_nodes.add(pkg)
5195                 slot_key = (pkg.slot_atom, pkg.root)
5196                 slot_nodes = self._slot_collision_info.get(slot_key)
5197                 if slot_nodes is None:
5198                         slot_nodes = set()
5199                         slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5200                         self._slot_collision_info[slot_key] = slot_nodes
5201                 slot_nodes.add(pkg)
5202
5203         def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5204
5205                 mytype = pkg.type_name
5206                 myroot = pkg.root
5207                 mykey = pkg.cpv
5208                 metadata = pkg.metadata
5209                 myuse = pkg.use.enabled
5210                 jbigkey = pkg
5211                 depth = pkg.depth + 1
5212                 removal_action = "remove" in self.myparams
5213
5214                 edepend={}
5215                 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5216                 for k in depkeys:
5217                         edepend[k] = metadata[k]
5218
5219                 if not pkg.built and \
5220                         "--buildpkgonly" in self.myopts and \
5221                         "deep" not in self.myparams and \
5222                         "empty" not in self.myparams:
5223                         edepend["RDEPEND"] = ""
5224                         edepend["PDEPEND"] = ""
5225                 bdeps_optional = False
5226
5227                 if pkg.built and not removal_action:
5228                         if self.myopts.get("--with-bdeps", "n") == "y":
5229                                 # Pull in build time deps as requested, but marked them as
5230                                 # "optional" since they are not strictly required. This allows
5231                                 # more freedom in the merge order calculation for solving
5232                                 # circular dependencies. Don't convert to PDEPEND since that
5233                                 # could make --with-bdeps=y less effective if it is used to
5234                                 # adjust merge order to prevent built_with_use() calls from
5235                                 # failing.
5236                                 bdeps_optional = True
5237                         else:
5238                                 # built packages do not have build time dependencies.
5239                                 edepend["DEPEND"] = ""
5240
5241                 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5242                         edepend["DEPEND"] = ""
5243
5244                 deps = (
5245                         ("/", edepend["DEPEND"],
5246                                 self._priority(buildtime=(not bdeps_optional),
5247                                 optional=bdeps_optional)),
5248                         (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5249                         (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5250                 )
5251
5252                 debug = "--debug" in self.myopts
5253                 strict = mytype != "installed"
5254                 try:
5255                         for dep_root, dep_string, dep_priority in deps:
5256                                 if not dep_string:
5257                                         continue
5258                                 if debug:
5259                                         print
5260                                         print "Parent:   ", jbigkey
5261                                         print "Depstring:", dep_string
5262                                         print "Priority:", dep_priority
5263                                 vardb = self.roots[dep_root].trees["vartree"].dbapi
5264                                 try:
5265                                         selected_atoms = self._select_atoms(dep_root,
5266                                                 dep_string, myuse=myuse, parent=pkg, strict=strict,
5267                                                 priority=dep_priority)
5268                                 except portage.exception.InvalidDependString, e:
5269                                         show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5270                                         return 0
5271                                 if debug:
5272                                         print "Candidates:", selected_atoms
5273
5274                                 for atom in selected_atoms:
5275                                         try:
5276
5277                                                 atom = portage.dep.Atom(atom)
5278
5279                                                 mypriority = dep_priority.copy()
5280                                                 if not atom.blocker and vardb.match(atom):
5281                                                         mypriority.satisfied = True
5282
5283                                                 if not self._add_dep(Dependency(atom=atom,
5284                                                         blocker=atom.blocker, depth=depth, parent=pkg,
5285                                                         priority=mypriority, root=dep_root),
5286                                                         allow_unsatisfied=allow_unsatisfied):
5287                                                         return 0
5288
5289                                         except portage.exception.InvalidAtom, e:
5290                                                 show_invalid_depstring_notice(
5291                                                         pkg, dep_string, str(e))
5292                                                 del e
5293                                                 if not pkg.installed:
5294                                                         return 0
5295
5296                                 if debug:
5297                                         print "Exiting...", jbigkey
5298                 except portage.exception.AmbiguousPackageName, e:
5299                         pkgs = e.args[0]
5300                         portage.writemsg("\n\n!!! An atom in the dependencies " + \
5301                                 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5302                         for cpv in pkgs:
5303                                 portage.writemsg("    %s\n" % cpv, noiselevel=-1)
5304                         portage.writemsg("\n", noiselevel=-1)
5305                         if mytype == "binary":
5306                                 portage.writemsg(
5307                                         "!!! This binary package cannot be installed: '%s'\n" % \
5308                                         mykey, noiselevel=-1)
5309                         elif mytype == "ebuild":
5310                                 portdb = self.roots[myroot].trees["porttree"].dbapi
5311                                 myebuild, mylocation = portdb.findname2(mykey)
5312                                 portage.writemsg("!!! This ebuild cannot be installed: " + \
5313                                         "'%s'\n" % myebuild, noiselevel=-1)
5314                         portage.writemsg("!!! Please notify the package maintainer " + \
5315                                 "that atoms must be fully-qualified.\n", noiselevel=-1)
5316                         return 0
5317                 return 1
5318
5319         def _priority(self, **kwargs):
5320                 if "remove" in self.myparams:
5321                         priority_constructor = UnmergeDepPriority
5322                 else:
5323                         priority_constructor = DepPriority
5324                 return priority_constructor(**kwargs)
5325
5326         def _dep_expand(self, root_config, atom_without_category):
5327                 """
5328                 @param root_config: a root config instance
5329                 @type root_config: RootConfig
5330                 @param atom_without_category: an atom without a category component
5331                 @type atom_without_category: String
5332                 @rtype: list
5333                 @returns: a list of atoms containing categories (possibly empty)
5334                 """
5335                 null_cp = portage.dep_getkey(insert_category_into_atom(
5336                         atom_without_category, "null"))
5337                 cat, atom_pn = portage.catsplit(null_cp)
5338
5339                 dbs = self._filtered_trees[root_config.root]["dbs"]
5340                 categories = set()
5341                 for db, pkg_type, built, installed, db_keys in dbs:
5342                         for cat in db.categories:
5343                                 if db.cp_list("%s/%s" % (cat, atom_pn)):
5344                                         categories.add(cat)
5345
5346                 deps = []
5347                 for cat in categories:
5348                         deps.append(insert_category_into_atom(
5349                                 atom_without_category, cat))
5350                 return deps
5351
5352         def _have_new_virt(self, root, atom_cp):
5353                 ret = False
5354                 for db, pkg_type, built, installed, db_keys in \
5355                         self._filtered_trees[root]["dbs"]:
5356                         if db.cp_list(atom_cp):
5357                                 ret = True
5358                                 break
5359                 return ret
5360
5361         def _iter_atoms_for_pkg(self, pkg):
5362                 # TODO: add multiple $ROOT support
5363                 if pkg.root != self.target_root:
5364                         return
5365                 atom_arg_map = self._atom_arg_map
5366                 root_config = self.roots[pkg.root]
5367                 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5368                         atom_cp = portage.dep_getkey(atom)
5369                         if atom_cp != pkg.cp and \
5370                                 self._have_new_virt(pkg.root, atom_cp):
5371                                 continue
5372                         visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5373                         visible_pkgs.reverse() # descending order
5374                         higher_slot = None
5375                         for visible_pkg in visible_pkgs:
5376                                 if visible_pkg.cp != atom_cp:
5377                                         continue
5378                                 if pkg >= visible_pkg:
5379                                         # This is descending order, and we're not
5380                                         # interested in any versions <= pkg given.
5381                                         break
5382                                 if pkg.slot_atom != visible_pkg.slot_atom:
5383                                         higher_slot = visible_pkg
5384                                         break
5385                         if higher_slot is not None:
5386                                 continue
5387                         for arg in atom_arg_map[(atom, pkg.root)]:
5388                                 if isinstance(arg, PackageArg) and \
5389                                         arg.package != pkg:
5390                                         continue
5391                                 yield arg, atom
5392
5393         def select_files(self, myfiles):
5394                 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5395                 appropriate depgraph and return a favorite list."""
5396                 debug = "--debug" in self.myopts
5397                 root_config = self.roots[self.target_root]
5398                 sets = root_config.sets
5399                 getSetAtoms = root_config.setconfig.getSetAtoms
5400                 myfavorites=[]
5401                 myroot = self.target_root
5402                 dbs = self._filtered_trees[myroot]["dbs"]
5403                 vardb = self.trees[myroot]["vartree"].dbapi
5404                 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5405                 portdb = self.trees[myroot]["porttree"].dbapi
5406                 bindb = self.trees[myroot]["bintree"].dbapi
5407                 pkgsettings = self.pkgsettings[myroot]
5408                 args = []
5409                 onlydeps = "--onlydeps" in self.myopts
5410                 lookup_owners = []
5411                 for x in myfiles:
5412                         ext = os.path.splitext(x)[1]
5413                         if ext==".tbz2":
5414                                 if not os.path.exists(x):
5415                                         if os.path.exists(
5416                                                 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5417                                                 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5418                                         elif os.path.exists(
5419                                                 os.path.join(pkgsettings["PKGDIR"], x)):
5420                                                 x = os.path.join(pkgsettings["PKGDIR"], x)
5421                                         else:
5422                                                 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5423                                                 print "!!! Please ensure the tbz2 exists as specified.\n"
5424                                                 return 0, myfavorites
5425                                 mytbz2=portage.xpak.tbz2(x)
5426                                 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5427                                 if os.path.realpath(x) != \
5428                                         os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5429                                         print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5430                                         return 0, myfavorites
5431                                 db_keys = list(bindb._aux_cache_keys)
5432                                 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5433                                 pkg = Package(type_name="binary", root_config=root_config,
5434                                         cpv=mykey, built=True, metadata=metadata,
5435                                         onlydeps=onlydeps)
5436                                 self._pkg_cache[pkg] = pkg
5437                                 args.append(PackageArg(arg=x, package=pkg,
5438                                         root_config=root_config))
5439                         elif ext==".ebuild":
5440                                 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5441                                 pkgdir = os.path.dirname(ebuild_path)
5442                                 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5443                                 cp = pkgdir[len(tree_root)+1:]
5444                                 e = portage.exception.PackageNotFound(
5445                                         ("%s is not in a valid portage tree " + \
5446                                         "hierarchy or does not exist") % x)
5447                                 if not portage.isvalidatom(cp):
5448                                         raise e
5449                                 cat = portage.catsplit(cp)[0]
5450                                 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5451                                 if not portage.isvalidatom("="+mykey):
5452                                         raise e
5453                                 ebuild_path = portdb.findname(mykey)
5454                                 if ebuild_path:
5455                                         if ebuild_path != os.path.join(os.path.realpath(tree_root),
5456                                                 cp, os.path.basename(ebuild_path)):
5457                                                 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5458                                                 return 0, myfavorites
5459                                         if mykey not in portdb.xmatch(
5460                                                 "match-visible", portage.dep_getkey(mykey)):
5461                                                 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5462                                                 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5463                                                 print colorize("BAD", "*** page for details.")
5464                                                 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5465                                                         "Continuing...")
5466                                 else:
5467                                         raise portage.exception.PackageNotFound(
5468                                                 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5469                                 db_keys = list(portdb._aux_cache_keys)
5470                                 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5471                                 pkg = Package(type_name="ebuild", root_config=root_config,
5472                                         cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5473                                 pkgsettings.setcpv(pkg)
5474                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5475                                 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
5476                                 self._pkg_cache[pkg] = pkg
5477                                 args.append(PackageArg(arg=x, package=pkg,
5478                                         root_config=root_config))
5479                         elif x.startswith(os.path.sep):
5480                                 if not x.startswith(myroot):
5481                                         portage.writemsg(("\n\n!!! '%s' does not start with" + \
5482                                                 " $ROOT.\n") % x, noiselevel=-1)
5483                                         return 0, []
5484                                 # Queue these up since it's most efficient to handle
5485                                 # multiple files in a single iter_owners() call.
5486                                 lookup_owners.append(x)
5487                         else:
5488                                 if x in ("system", "world"):
5489                                         x = SETPREFIX + x
5490                                 if x.startswith(SETPREFIX):
5491                                         s = x[len(SETPREFIX):]
5492                                         if s not in sets:
5493                                                 raise portage.exception.PackageSetNotFound(s)
5494                                         if s in self._sets:
5495                                                 continue
5496                                         # Recursively expand sets so that containment tests in
5497                                         # self._get_parent_sets() properly match atoms in nested
5498                                         # sets (like if world contains system).
5499                                         expanded_set = InternalPackageSet(
5500                                                 initial_atoms=getSetAtoms(s))
5501                                         self._sets[s] = expanded_set
5502                                         args.append(SetArg(arg=x, set=expanded_set,
5503                                                 root_config=root_config))
5504                                         continue
5505                                 if not is_valid_package_atom(x):
5506                                         portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5507                                                 noiselevel=-1)
5508                                         portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5509                                         portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5510                                         return (0,[])
5511                                 # Don't expand categories or old-style virtuals here unless
5512                                 # necessary. Expansion of old-style virtuals here causes at
5513                                 # least the following problems:
5514                                 #   1) It's more difficult to determine which set(s) an atom
5515                                 #      came from, if any.
5516                                 #   2) It takes away freedom from the resolver to choose other
5517                                 #      possible expansions when necessary.
5518                                 if "/" in x:
5519                                         args.append(AtomArg(arg=x, atom=x,
5520                                                 root_config=root_config))
5521                                         continue
5522                                 expanded_atoms = self._dep_expand(root_config, x)
5523                                 installed_cp_set = set()
5524                                 for atom in expanded_atoms:
5525                                         atom_cp = portage.dep_getkey(atom)
5526                                         if vardb.cp_list(atom_cp):
5527                                                 installed_cp_set.add(atom_cp)
5528                                 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5529                                         installed_cp = iter(installed_cp_set).next()
5530                                         expanded_atoms = [atom for atom in expanded_atoms \
5531                                                 if portage.dep_getkey(atom) == installed_cp]
5532
5533                                 if len(expanded_atoms) > 1:
5534                                         print
5535                                         print
5536                                         ambiguous_package_name(x, expanded_atoms, root_config,
5537                                                 self.spinner, self.myopts)
5538                                         return False, myfavorites
5539                                 if expanded_atoms:
5540                                         atom = expanded_atoms[0]
5541                                 else:
5542                                         null_atom = insert_category_into_atom(x, "null")
5543                                         null_cp = portage.dep_getkey(null_atom)
5544                                         cat, atom_pn = portage.catsplit(null_cp)
5545                                         virts_p = root_config.settings.get_virts_p().get(atom_pn)
5546                                         if virts_p:
5547                                                 # Allow the depgraph to choose which virtual.
5548                                                 atom = insert_category_into_atom(x, "virtual")
5549                                         else:
5550                                                 atom = insert_category_into_atom(x, "null")
5551
5552                                 args.append(AtomArg(arg=x, atom=atom,
5553                                         root_config=root_config))
5554
5555                 if lookup_owners:
5556                         relative_paths = []
5557                         search_for_multiple = False
5558                         if len(lookup_owners) > 1:
5559                                 search_for_multiple = True
5560
5561                         for x in lookup_owners:
5562                                 if not search_for_multiple and os.path.isdir(x):
5563                                         search_for_multiple = True
5564                                 relative_paths.append(x[len(myroot):])
5565
5566                         owners = set()
5567                         for pkg, relative_path in \
5568                                 real_vardb._owners.iter_owners(relative_paths):
5569                                 owners.add(pkg.mycpv)
5570                                 if not search_for_multiple:
5571                                         break
5572
5573                         if not owners:
5574                                 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5575                                         "by any package.\n") % lookup_owners[0], noiselevel=-1)
5576                                 return 0, []
5577
5578                         for cpv in owners:
5579                                 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5580                                 if not slot:
5581                                         # portage now masks packages with missing slot, but it's
5582                                         # possible that one was installed by an older version
5583                                         atom = portage.cpv_getkey(cpv)
5584                                 else:
5585                                         atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5586                                 args.append(AtomArg(arg=atom, atom=atom,
5587                                         root_config=root_config))
5588
5589                 if "--update" in self.myopts:
5590                         # In some cases, the greedy slots behavior can pull in a slot that
5591                         # the user would want to uninstall due to it being blocked by a
5592                         # newer version in a different slot. Therefore, it's necessary to
5593                         # detect and discard any that should be uninstalled. Each time
5594                         # that arguments are updated, package selections are repeated in
5595                         # order to ensure consistency with the current arguments:
5596                         #
5597                         #  1) Initialize args
5598                         #  2) Select packages and generate initial greedy atoms
5599                         #  3) Update args with greedy atoms
5600                         #  4) Select packages and generate greedy atoms again, while
5601                         #     accounting for any blockers between selected packages
5602                         #  5) Update args with revised greedy atoms
5603
5604                         self._set_args(args)
5605                         greedy_args = []
5606                         for arg in args:
5607                                 greedy_args.append(arg)
5608                                 if not isinstance(arg, AtomArg):
5609                                         continue
5610                                 for atom in self._greedy_slots(arg.root_config, arg.atom):
5611                                         greedy_args.append(
5612                                                 AtomArg(arg=arg.arg, atom=atom,
5613                                                         root_config=arg.root_config))
5614
5615                         self._set_args(greedy_args)
5616                         del greedy_args
5617
5618                         # Revise greedy atoms, accounting for any blockers
5619                         # between selected packages.
5620                         revised_greedy_args = []
5621                         for arg in args:
5622                                 revised_greedy_args.append(arg)
5623                                 if not isinstance(arg, AtomArg):
5624                                         continue
5625                                 for atom in self._greedy_slots(arg.root_config, arg.atom,
5626                                         blocker_lookahead=True):
5627                                         revised_greedy_args.append(
5628                                                 AtomArg(arg=arg.arg, atom=atom,
5629                                                         root_config=arg.root_config))
5630                         args = revised_greedy_args
5631                         del revised_greedy_args
5632
5633                 self._set_args(args)
5634
5635                 myfavorites = set(myfavorites)
5636                 for arg in args:
5637                         if isinstance(arg, (AtomArg, PackageArg)):
5638                                 myfavorites.add(arg.atom)
5639                         elif isinstance(arg, SetArg):
5640                                 myfavorites.add(arg.arg)
5641                 myfavorites = list(myfavorites)
5642
5643                 pprovideddict = pkgsettings.pprovideddict
5644                 if debug:
5645                         portage.writemsg("\n", noiselevel=-1)
5646                 # Order needs to be preserved since a feature of --nodeps
5647                 # is to allow the user to force a specific merge order.
5648                 args.reverse()
5649                 while args:
5650                         arg = args.pop()
5651                         for atom in arg.set:
5652                                 self.spinner.update()
5653                                 dep = Dependency(atom=atom, onlydeps=onlydeps,
5654                                         root=myroot, parent=arg)
5655                                 atom_cp = portage.dep_getkey(atom)
5656                                 try:
5657                                         pprovided = pprovideddict.get(portage.dep_getkey(atom))
5658                                         if pprovided and portage.match_from_list(atom, pprovided):
5659                                                 # A provided package has been specified on the command line.
5660                                                 self._pprovided_args.append((arg, atom))
5661                                                 continue
5662                                         if isinstance(arg, PackageArg):
5663                                                 if not self._add_pkg(arg.package, dep) or \
5664                                                         not self._create_graph():
5665                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5666                                                                 "dependencies for %s\n") % arg.arg)
5667                                                         return 0, myfavorites
5668                                                 continue
5669                                         if debug:
5670                                                 portage.writemsg("      Arg: %s\n     Atom: %s\n" % \
5671                                                         (arg, atom), noiselevel=-1)
5672                                         pkg, existing_node = self._select_package(
5673                                                 myroot, atom, onlydeps=onlydeps)
5674                                         if not pkg:
5675                                                 if not (isinstance(arg, SetArg) and \
5676                                                         arg.name in ("system", "world")):
5677                                                         self._unsatisfied_deps_for_display.append(
5678                                                                 ((myroot, atom), {}))
5679                                                         return 0, myfavorites
5680                                                 self._missing_args.append((arg, atom))
5681                                                 continue
5682                                         if atom_cp != pkg.cp:
5683                                                 # For old-style virtuals, we need to repeat the
5684                                                 # package.provided check against the selected package.
5685                                                 expanded_atom = atom.replace(atom_cp, pkg.cp)
5686                                                 pprovided = pprovideddict.get(pkg.cp)
5687                                                 if pprovided and \
5688                                                         portage.match_from_list(expanded_atom, pprovided):
5689                                                         # A provided package has been
5690                                                         # specified on the command line.
5691                                                         self._pprovided_args.append((arg, atom))
5692                                                         continue
5693                                         if pkg.installed and "selective" not in self.myparams:
5694                                                 self._unsatisfied_deps_for_display.append(
5695                                                         ((myroot, atom), {}))
5696                                                 # Previous behavior was to bail out in this case, but
5697                                                 # since the dep is satisfied by the installed package,
5698                                                 # it's more friendly to continue building the graph
5699                                                 # and just show a warning message. Therefore, only bail
5700                                                 # out here if the atom is not from either the system or
5701                                                 # world set.
5702                                                 if not (isinstance(arg, SetArg) and \
5703                                                         arg.name in ("system", "world")):
5704                                                         return 0, myfavorites
5705
5706                                         # Add the selected package to the graph as soon as possible
5707                                         # so that later dep_check() calls can use it as feedback
5708                                         # for making more consistent atom selections.
5709                                         if not self._add_pkg(pkg, dep):
5710                                                 if isinstance(arg, SetArg):
5711                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5712                                                                 "dependencies for %s from %s\n") % \
5713                                                                 (atom, arg.arg))
5714                                                 else:
5715                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5716                                                                 "dependencies for %s\n") % atom)
5717                                                 return 0, myfavorites
5718
5719                                 except portage.exception.MissingSignature, e:
5720                                         portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5721                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5722                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5723                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5724                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5725                                         return 0, myfavorites
5726                                 except portage.exception.InvalidSignature, e:
5727                                         portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5728                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5729                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5730                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5731                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5732                                         return 0, myfavorites
5733                                 except SystemExit, e:
5734                                         raise # Needed else can't exit
5735                                 except Exception, e:
5736                                         print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5737                                         print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5738                                         raise
5739
5740                 # Now that the root packages have been added to the graph,
5741                 # process the dependencies.
5742                 if not self._create_graph():
5743                         return 0, myfavorites
5744
5745                 missing=0
5746                 if "--usepkgonly" in self.myopts:
5747                         for xs in self.digraph.all_nodes():
5748                                 if not isinstance(xs, Package):
5749                                         continue
5750                                 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5751                                         if missing == 0:
5752                                                 print
5753                                         missing += 1
5754                                         print "Missing binary for:",xs[2]
5755
5756                 try:
5757                         self.altlist()
5758                 except self._unknown_internal_error:
5759                         return False, myfavorites
5760
5761                 # We're true here unless we are missing binaries.
5762                 return (not missing,myfavorites)
5763
5764         def _set_args(self, args):
5765                 """
5766                 Create the "args" package set from atoms and packages given as
5767                 arguments. This method can be called multiple times if necessary.
5768                 The package selection cache is automatically invalidated, since
5769                 arguments influence package selections.
5770                 """
5771                 args_set = self._sets["args"]
5772                 args_set.clear()
5773                 for arg in args:
5774                         if not isinstance(arg, (AtomArg, PackageArg)):
5775                                 continue
5776                         atom = arg.atom
5777                         if atom in args_set:
5778                                 continue
5779                         args_set.add(atom)
5780
5781                 self._set_atoms.clear()
5782                 self._set_atoms.update(chain(*self._sets.itervalues()))
5783                 atom_arg_map = self._atom_arg_map
5784                 atom_arg_map.clear()
5785                 for arg in args:
5786                         for atom in arg.set:
5787                                 atom_key = (atom, arg.root_config.root)
5788                                 refs = atom_arg_map.get(atom_key)
5789                                 if refs is None:
5790                                         refs = []
5791                                         atom_arg_map[atom_key] = refs
5792                                         if arg not in refs:
5793                                                 refs.append(arg)
5794
5795                 # Invalidate the package selection cache, since
5796                 # arguments influence package selections.
5797                 self._highest_pkg_cache.clear()
5798                 for trees in self._filtered_trees.itervalues():
5799                         trees["porttree"].dbapi._clear_cache()
5800
5801         def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5802                 """
5803                 Return a list of slot atoms corresponding to installed slots that
5804                 differ from the slot of the highest visible match. When
5805                 blocker_lookahead is True, slot atoms that would trigger a blocker
5806                 conflict are automatically discarded, potentially allowing automatic
5807                 uninstallation of older slots when appropriate.
5808                 """
5809                 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5810                 if highest_pkg is None:
5811                         return []
5812                 vardb = root_config.trees["vartree"].dbapi
5813                 slots = set()
5814                 for cpv in vardb.match(atom):
5815                         # don't mix new virtuals with old virtuals
5816                         if portage.cpv_getkey(cpv) == highest_pkg.cp:
5817                                 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5818
5819                 slots.add(highest_pkg.metadata["SLOT"])
5820                 if len(slots) == 1:
5821                         return []
5822                 greedy_pkgs = []
5823                 slots.remove(highest_pkg.metadata["SLOT"])
5824                 while slots:
5825                         slot = slots.pop()
5826                         slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5827                         pkg, in_graph = self._select_package(root_config.root, slot_atom)
5828                         if pkg is not None and \
5829                                 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
5830                                 greedy_pkgs.append(pkg)
5831                 if not greedy_pkgs:
5832                         return []
5833                 if not blocker_lookahead:
5834                         return [pkg.slot_atom for pkg in greedy_pkgs]
5835
5836                 blockers = {}
5837                 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5838                 for pkg in greedy_pkgs + [highest_pkg]:
5839                         dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5840                         try:
5841                                 atoms = self._select_atoms(
5842                                         pkg.root, dep_str, pkg.use.enabled,
5843                                         parent=pkg, strict=True)
5844                         except portage.exception.InvalidDependString:
5845                                 continue
5846                         blocker_atoms = (x for x in atoms if x.blocker)
5847                         blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5848
5849                 if highest_pkg not in blockers:
5850                         return []
5851
5852                 # filter packages with invalid deps
5853                 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5854
5855                 # filter packages that conflict with highest_pkg
5856                 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5857                         (blockers[highest_pkg].findAtomForPackage(pkg) or \
5858                         blockers[pkg].findAtomForPackage(highest_pkg))]
5859
5860                 if not greedy_pkgs:
5861                         return []
5862
5863                 # If two packages conflict, discard the lower version.
5864                 discard_pkgs = set()
5865                 greedy_pkgs.sort(reverse=True)
5866                 for i in xrange(len(greedy_pkgs) - 1):
5867                         pkg1 = greedy_pkgs[i]
5868                         if pkg1 in discard_pkgs:
5869                                 continue
5870                         for j in xrange(i + 1, len(greedy_pkgs)):
5871                                 pkg2 = greedy_pkgs[j]
5872                                 if pkg2 in discard_pkgs:
5873                                         continue
5874                                 if blockers[pkg1].findAtomForPackage(pkg2) or \
5875                                         blockers[pkg2].findAtomForPackage(pkg1):
5876                                         # pkg1 > pkg2
5877                                         discard_pkgs.add(pkg2)
5878
5879                 return [pkg.slot_atom for pkg in greedy_pkgs \
5880                         if pkg not in discard_pkgs]
5881
5882         def _select_atoms_from_graph(self, *pargs, **kwargs):
5883                 """
5884                 Prefer atoms matching packages that have already been
5885                 added to the graph or those that are installed and have
5886                 not been scheduled for replacement.
5887                 """
5888                 kwargs["trees"] = self._graph_trees
5889                 return self._select_atoms_highest_available(*pargs, **kwargs)
5890
5891         def _select_atoms_highest_available(self, root, depstring,
5892                 myuse=None, parent=None, strict=True, trees=None, priority=None):
5893                 """This will raise InvalidDependString if necessary. If trees is
5894                 None then self._filtered_trees is used."""
5895                 pkgsettings = self.pkgsettings[root]
5896                 if trees is None:
5897                         trees = self._filtered_trees
5898                 if not getattr(priority, "buildtime", False):
5899                         # The parent should only be passed to dep_check() for buildtime
5900                         # dependencies since that's the only case when it's appropriate
5901                         # to trigger the circular dependency avoidance code which uses it.
5902                         # It's important not to trigger the same circular dependency
5903                         # avoidance code for runtime dependencies since it's not needed
5904                         # and it can promote an incorrect package choice.
5905                         parent = None
5906                 if True:
5907                         try:
5908                                 if parent is not None:
5909                                         trees[root]["parent"] = parent
5910                                 if not strict:
5911                                         portage.dep._dep_check_strict = False
5912                                 mycheck = portage.dep_check(depstring, None,
5913                                         pkgsettings, myuse=myuse,
5914                                         myroot=root, trees=trees)
5915                         finally:
5916                                 if parent is not None:
5917                                         trees[root].pop("parent")
5918                                 portage.dep._dep_check_strict = True
5919                         if not mycheck[0]:
5920                                 raise portage.exception.InvalidDependString(mycheck[1])
5921                         selected_atoms = mycheck[1]
5922                 return selected_atoms
5923
5924         def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5925                 atom = portage.dep.Atom(atom)
5926                 atom_set = InternalPackageSet(initial_atoms=(atom,))
5927                 atom_without_use = atom
5928                 if atom.use:
5929                         atom_without_use = portage.dep.remove_slot(atom)
5930                         if atom.slot:
5931                                 atom_without_use += ":" + atom.slot
5932                         atom_without_use = portage.dep.Atom(atom_without_use)
5933                 xinfo = '"%s"' % atom
5934                 if arg:
5935                         xinfo='"%s"' % arg
5936                 # Discard null/ from failed cpv_expand category expansion.
5937                 xinfo = xinfo.replace("null/", "")
5938                 masked_packages = []
5939                 missing_use = []
5940                 masked_pkg_instances = set()
5941                 missing_licenses = []
5942                 have_eapi_mask = False
5943                 pkgsettings = self.pkgsettings[root]
5944                 implicit_iuse = pkgsettings._get_implicit_iuse()
5945                 root_config = self.roots[root]
5946                 portdb = self.roots[root].trees["porttree"].dbapi
5947                 dbs = self._filtered_trees[root]["dbs"]
5948                 for db, pkg_type, built, installed, db_keys in dbs:
5949                         if installed:
5950                                 continue
5951                         match = db.match
5952                         if hasattr(db, "xmatch"):
5953                                 cpv_list = db.xmatch("match-all", atom_without_use)
5954                         else:
5955                                 cpv_list = db.match(atom_without_use)
5956                         # descending order
5957                         cpv_list.reverse()
5958                         for cpv in cpv_list:
5959                                 metadata, mreasons  = get_mask_info(root_config, cpv,
5960                                         pkgsettings, db, pkg_type, built, installed, db_keys)
5961                                 if metadata is not None:
5962                                         pkg = Package(built=built, cpv=cpv,
5963                                                 installed=installed, metadata=metadata,
5964                                                 root_config=root_config)
5965                                         if pkg.cp != atom.cp:
5966                                                 # A cpv can be returned from dbapi.match() as an
5967                                                 # old-style virtual match even in cases when the
5968                                                 # package does not actually PROVIDE the virtual.
5969                                                 # Filter out any such false matches here.
5970                                                 if not atom_set.findAtomForPackage(pkg):
5971                                                         continue
5972                                         if mreasons:
5973                                                 masked_pkg_instances.add(pkg)
5974                                         if atom.use:
5975                                                 missing_use.append(pkg)
5976                                                 if not mreasons:
5977                                                         continue
5978                                 masked_packages.append(
5979                                         (root_config, pkgsettings, cpv, metadata, mreasons))
5980
5981                 missing_use_reasons = []
5982                 missing_iuse_reasons = []
5983                 for pkg in missing_use:
5984                         use = pkg.use.enabled
5985                         iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
5986                         iuse_re = re.compile("^(%s)$" % "|".join(iuse))
5987                         missing_iuse = []
5988                         for x in atom.use.required:
5989                                 if iuse_re.match(x) is None:
5990                                         missing_iuse.append(x)
5991                         mreasons = []
5992                         if missing_iuse:
5993                                 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
5994                                 missing_iuse_reasons.append((pkg, mreasons))
5995                         else:
5996                                 need_enable = sorted(atom.use.enabled.difference(use))
5997                                 need_disable = sorted(atom.use.disabled.intersection(use))
5998                                 if need_enable or need_disable:
5999                                         changes = []
6000                                         changes.extend(colorize("red", "+" + x) \
6001                                                 for x in need_enable)
6002                                         changes.extend(colorize("blue", "-" + x) \
6003                                                 for x in need_disable)
6004                                         mreasons.append("Change USE: %s" % " ".join(changes))
6005                                         missing_use_reasons.append((pkg, mreasons))
6006
6007                 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6008                         in missing_use_reasons if pkg not in masked_pkg_instances]
6009
6010                 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6011                         in missing_iuse_reasons if pkg not in masked_pkg_instances]
6012
6013                 show_missing_use = False
6014                 if unmasked_use_reasons:
6015                         # Only show the latest version.
6016                         show_missing_use = unmasked_use_reasons[:1]
6017                 elif unmasked_iuse_reasons:
6018                         if missing_use_reasons:
6019                                 # All packages with required IUSE are masked,
6020                                 # so display a normal masking message.
6021                                 pass
6022                         else:
6023                                 show_missing_use = unmasked_iuse_reasons
6024
6025                 if show_missing_use:
6026                         print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
6027                         print "!!! One of the following packages is required to complete your request:"
6028                         for pkg, mreasons in show_missing_use:
6029                                 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
6030
6031                 elif masked_packages:
6032                         print "\n!!! " + \
6033                                 colorize("BAD", "All ebuilds that could satisfy ") + \
6034                                 colorize("INFORM", xinfo) + \
6035                                 colorize("BAD", " have been masked.")
6036                         print "!!! One of the following masked packages is required to complete your request:"
6037                         have_eapi_mask = show_masked_packages(masked_packages)
6038                         if have_eapi_mask:
6039                                 print
6040                                 msg = ("The current version of portage supports " + \
6041                                         "EAPI '%s'. You must upgrade to a newer version" + \
6042                                         " of portage before EAPI masked packages can" + \
6043                                         " be installed.") % portage.const.EAPI
6044                                 from textwrap import wrap
6045                                 for line in wrap(msg, 75):
6046                                         print line
6047                         print
6048                         show_mask_docs()
6049                 else:
6050                         print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
6051
6052                 # Show parent nodes and the argument that pulled them in.
6053                 traversed_nodes = set()
6054                 node = myparent
6055                 msg = []
6056                 while node is not None:
6057                         traversed_nodes.add(node)
6058                         msg.append('(dependency required by "%s" [%s])' % \
6059                                 (colorize('INFORM', str(node.cpv)), node.type_name))
6060                         # When traversing to parents, prefer arguments over packages
6061                         # since arguments are root nodes. Never traverse the same
6062                         # package twice, in order to prevent an infinite loop.
6063                         selected_parent = None
6064                         for parent in self.digraph.parent_nodes(node):
6065                                 if isinstance(parent, DependencyArg):
6066                                         msg.append('(dependency required by "%s" [argument])' % \
6067                                                 (colorize('INFORM', str(parent))))
6068                                         selected_parent = None
6069                                         break
6070                                 if parent not in traversed_nodes:
6071                                         selected_parent = parent
6072                         node = selected_parent
6073                 for line in msg:
6074                         print line
6075
6076                 print
6077
6078         def _select_pkg_highest_available(self, root, atom, onlydeps=False):
6079                 cache_key = (root, atom, onlydeps)
6080                 ret = self._highest_pkg_cache.get(cache_key)
6081                 if ret is not None:
6082                         pkg, existing = ret
6083                         if pkg and not existing:
6084                                 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
6085                                 if existing and existing == pkg:
6086                                         # Update the cache to reflect that the
6087                                         # package has been added to the graph.
6088                                         ret = pkg, pkg
6089                                         self._highest_pkg_cache[cache_key] = ret
6090                         return ret
6091                 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
6092                 self._highest_pkg_cache[cache_key] = ret
6093                 pkg, existing = ret
6094                 if pkg is not None:
6095                         settings = pkg.root_config.settings
6096                         if visible(settings, pkg) and not (pkg.installed and \
6097                                 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
6098                                 pkg.root_config.visible_pkgs.cpv_inject(pkg)
6099                 return ret
6100
6101         def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
6102                 root_config = self.roots[root]
6103                 pkgsettings = self.pkgsettings[root]
6104                 dbs = self._filtered_trees[root]["dbs"]
6105                 vardb = self.roots[root].trees["vartree"].dbapi
6106                 portdb = self.roots[root].trees["porttree"].dbapi
6107                 # List of acceptable packages, ordered by type preference.
6108                 matched_packages = []
6109                 highest_version = None
6110                 if not isinstance(atom, portage.dep.Atom):
6111                         atom = portage.dep.Atom(atom)
6112                 atom_cp = atom.cp
6113                 atom_set = InternalPackageSet(initial_atoms=(atom,))
6114                 existing_node = None
6115                 myeb = None
6116                 usepkgonly = "--usepkgonly" in self.myopts
6117                 empty = "empty" in self.myparams
6118                 selective = "selective" in self.myparams
6119                 reinstall = False
6120                 noreplace = "--noreplace" in self.myopts
6121                 # Behavior of the "selective" parameter depends on
6122                 # whether or not a package matches an argument atom.
6123                 # If an installed package provides an old-style
6124                 # virtual that is no longer provided by an available
6125                 # package, the installed package may match an argument
6126                 # atom even though none of the available packages do.
6127                 # Therefore, "selective" logic does not consider
6128                 # whether or not an installed package matches an
6129                 # argument atom. It only considers whether or not
6130                 # available packages match argument atoms, which is
6131                 # represented by the found_available_arg flag.
6132                 found_available_arg = False
6133                 for find_existing_node in True, False:
6134                         if existing_node:
6135                                 break
6136                         for db, pkg_type, built, installed, db_keys in dbs:
6137                                 if existing_node:
6138                                         break
6139                                 if installed and not find_existing_node:
6140                                         want_reinstall = reinstall or empty or \
6141                                                 (found_available_arg and not selective)
6142                                         if want_reinstall and matched_packages:
6143                                                 continue
6144                                 if hasattr(db, "xmatch"):
6145                                         cpv_list = db.xmatch("match-all", atom)
6146                                 else:
6147                                         cpv_list = db.match(atom)
6148
6149                                 # USE=multislot can make an installed package appear as if
6150                                 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
6151                                 # won't do any good as long as USE=multislot is enabled since
6152                                 # the newly built package still won't have the expected slot.
6153                                 # Therefore, assume that such SLOT dependencies are already
6154                                 # satisfied rather than forcing a rebuild.
6155                                 if installed and not cpv_list and atom.slot:
6156                                         for cpv in db.match(atom.cp):
6157                                                 slot_available = False
6158                                                 for other_db, other_type, other_built, \
6159                                                         other_installed, other_keys in dbs:
6160                                                         try:
6161                                                                 if atom.slot == \
6162                                                                         other_db.aux_get(cpv, ["SLOT"])[0]:
6163                                                                         slot_available = True
6164                                                                         break
6165                                                         except KeyError:
6166                                                                 pass
6167                                                 if not slot_available:
6168                                                         continue
6169                                                 inst_pkg = self._pkg(cpv, "installed",
6170                                                         root_config, installed=installed)
6171                                                 # Remove the slot from the atom and verify that
6172                                                 # the package matches the resulting atom.
6173                                                 atom_without_slot = portage.dep.remove_slot(atom)
6174                                                 if atom.use:
6175                                                         atom_without_slot += str(atom.use)
6176                                                 atom_without_slot = portage.dep.Atom(atom_without_slot)
6177                                                 if portage.match_from_list(
6178                                                         atom_without_slot, [inst_pkg]):
6179                                                         cpv_list = [inst_pkg.cpv]
6180                                                 break
6181
6182                                 if not cpv_list:
6183                                         continue
6184                                 pkg_status = "merge"
6185                                 if installed or onlydeps:
6186                                         pkg_status = "nomerge"
6187                                 # descending order
6188                                 cpv_list.reverse()
6189                                 for cpv in cpv_list:
6190                                         # Make --noreplace take precedence over --newuse.
6191                                         if not installed and noreplace and \
6192                                                 cpv in vardb.match(atom):
6193                                                 # If the installed version is masked, it may
6194                                                 # be necessary to look at lower versions,
6195                                                 # in case there is a visible downgrade.
6196                                                 continue
6197                                         reinstall_for_flags = None
6198                                         cache_key = (pkg_type, root, cpv, pkg_status)
6199                                         calculated_use = True
6200                                         pkg = self._pkg_cache.get(cache_key)
6201                                         if pkg is None:
6202                                                 calculated_use = False
6203                                                 try:
6204                                                         metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6205                                                 except KeyError:
6206                                                         continue
6207                                                 pkg = Package(built=built, cpv=cpv,
6208                                                         installed=installed, metadata=metadata,
6209                                                         onlydeps=onlydeps, root_config=root_config,
6210                                                         type_name=pkg_type)
6211                                                 metadata = pkg.metadata
6212                                                 if not built:
6213                                                         metadata['CHOST'] = pkgsettings.get('CHOST', '')
6214                                                 if not built and ("?" in metadata["LICENSE"] or \
6215                                                         "?" in metadata["PROVIDE"]):
6216                                                         # This is avoided whenever possible because
6217                                                         # it's expensive. It only needs to be done here
6218                                                         # if it has an effect on visibility.
6219                                                         pkgsettings.setcpv(pkg)
6220                                                         metadata["USE"] = pkgsettings["PORTAGE_USE"]
6221                                                         calculated_use = True
6222                                                 self._pkg_cache[pkg] = pkg
6223
6224                                         if not installed or (built and matched_packages):
6225                                                 # Only enforce visibility on installed packages
6226                                                 # if there is at least one other visible package
6227                                                 # available. By filtering installed masked packages
6228                                                 # here, packages that have been masked since they
6229                                                 # were installed can be automatically downgraded
6230                                                 # to an unmasked version.
6231                                                 try:
6232                                                         if not visible(pkgsettings, pkg):
6233                                                                 continue
6234                                                 except portage.exception.InvalidDependString:
6235                                                         if not installed:
6236                                                                 continue
6237
6238                                                 # Enable upgrade or downgrade to a version
6239                                                 # with visible KEYWORDS when the installed
6240                                                 # version is masked by KEYWORDS, but never
6241                                                 # reinstall the same exact version only due
6242                                                 # to a KEYWORDS mask.
6243                                                 if built and matched_packages:
6244
6245                                                         different_version = None
6246                                                         for avail_pkg in matched_packages:
6247                                                                 if not portage.dep.cpvequal(
6248                                                                         pkg.cpv, avail_pkg.cpv):
6249                                                                         different_version = avail_pkg
6250                                                                         break
6251                                                         if different_version is not None:
6252
6253                                                                 if installed and \
6254                                                                         pkgsettings._getMissingKeywords(
6255                                                                         pkg.cpv, pkg.metadata):
6256                                                                         continue
6257
6258                                                                 # If the ebuild no longer exists or it's
6259                                                                 # keywords have been dropped, reject built
6260                                                                 # instances (installed or binary).
6261                                                                 # If --usepkgonly is enabled, assume that
6262                                                                 # the ebuild status should be ignored.
6263                                                                 if not usepkgonly:
6264                                                                         try:
6265                                                                                 pkg_eb = self._pkg(
6266                                                                                         pkg.cpv, "ebuild", root_config)
6267                                                                         except portage.exception.PackageNotFound:
6268                                                                                 continue
6269                                                                         else:
6270                                                                                 if not visible(pkgsettings, pkg_eb):
6271                                                                                         continue
6272
6273                                         if not pkg.built and not calculated_use:
6274                                                 # This is avoided whenever possible because
6275                                                 # it's expensive.
6276                                                 pkgsettings.setcpv(pkg)
6277                                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6278
6279                                         if pkg.cp != atom.cp:
6280                                                 # A cpv can be returned from dbapi.match() as an
6281                                                 # old-style virtual match even in cases when the
6282                                                 # package does not actually PROVIDE the virtual.
6283                                                 # Filter out any such false matches here.
6284                                                 if not atom_set.findAtomForPackage(pkg):
6285                                                         continue
6286
6287                                         myarg = None
6288                                         if root == self.target_root:
6289                                                 try:
6290                                                         # Ebuild USE must have been calculated prior
6291                                                         # to this point, in case atoms have USE deps.
6292                                                         myarg = self._iter_atoms_for_pkg(pkg).next()
6293                                                 except StopIteration:
6294                                                         pass
6295                                                 except portage.exception.InvalidDependString:
6296                                                         if not installed:
6297                                                                 # masked by corruption
6298                                                                 continue
6299                                         if not installed and myarg:
6300                                                 found_available_arg = True
6301
6302                                         if atom.use and not pkg.built:
6303                                                 use = pkg.use.enabled
6304                                                 if atom.use.enabled.difference(use):
6305                                                         continue
6306                                                 if atom.use.disabled.intersection(use):
6307                                                         continue
6308                                         if pkg.cp == atom_cp:
6309                                                 if highest_version is None:
6310                                                         highest_version = pkg
6311                                                 elif pkg > highest_version:
6312                                                         highest_version = pkg
6313                                         # At this point, we've found the highest visible
6314                                         # match from the current repo. Any lower versions
6315                                         # from this repo are ignored, so this so the loop
6316                                         # will always end with a break statement below
6317                                         # this point.
6318                                         if find_existing_node:
6319                                                 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6320                                                 if not e_pkg:
6321                                                         break
6322                                                 if portage.dep.match_from_list(atom, [e_pkg]):
6323                                                         if highest_version and \
6324                                                                 e_pkg.cp == atom_cp and \
6325                                                                 e_pkg < highest_version and \
6326                                                                 e_pkg.slot_atom != highest_version.slot_atom:
6327                                                                 # There is a higher version available in a
6328                                                                 # different slot, so this existing node is
6329                                                                 # irrelevant.
6330                                                                 pass
6331                                                         else:
6332                                                                 matched_packages.append(e_pkg)
6333                                                                 existing_node = e_pkg
6334                                                 break
6335                                         # Compare built package to current config and
6336                                         # reject the built package if necessary.
6337                                         if built and not installed and \
6338                                                 ("--newuse" in self.myopts or \
6339                                                 "--reinstall" in self.myopts):
6340                                                 iuses = pkg.iuse.all
6341                                                 old_use = pkg.use.enabled
6342                                                 if myeb:
6343                                                         pkgsettings.setcpv(myeb)
6344                                                 else:
6345                                                         pkgsettings.setcpv(pkg)
6346                                                 now_use = pkgsettings["PORTAGE_USE"].split()
6347                                                 forced_flags = set()
6348                                                 forced_flags.update(pkgsettings.useforce)
6349                                                 forced_flags.update(pkgsettings.usemask)
6350                                                 cur_iuse = iuses
6351                                                 if myeb and not usepkgonly:
6352                                                         cur_iuse = myeb.iuse.all
6353                                                 if self._reinstall_for_flags(forced_flags,
6354                                                         old_use, iuses,
6355                                                         now_use, cur_iuse):
6356                                                         break
6357                                         # Compare current config to installed package
6358                                         # and do not reinstall if possible.
6359                                         if not installed and \
6360                                                 ("--newuse" in self.myopts or \
6361                                                 "--reinstall" in self.myopts) and \
6362                                                 cpv in vardb.match(atom):
6363                                                 pkgsettings.setcpv(pkg)
6364                                                 forced_flags = set()
6365                                                 forced_flags.update(pkgsettings.useforce)
6366                                                 forced_flags.update(pkgsettings.usemask)
6367                                                 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6368                                                 old_iuse = set(filter_iuse_defaults(
6369                                                         vardb.aux_get(cpv, ["IUSE"])[0].split()))
6370                                                 cur_use = pkgsettings["PORTAGE_USE"].split()
6371                                                 cur_iuse = pkg.iuse.all
6372                                                 reinstall_for_flags = \
6373                                                         self._reinstall_for_flags(
6374                                                         forced_flags, old_use, old_iuse,
6375                                                         cur_use, cur_iuse)
6376                                                 if reinstall_for_flags:
6377                                                         reinstall = True
6378                                         if not built:
6379                                                 myeb = pkg
6380                                         matched_packages.append(pkg)
6381                                         if reinstall_for_flags:
6382                                                 self._reinstall_nodes[pkg] = \
6383                                                         reinstall_for_flags
6384                                         break
6385
6386                 if not matched_packages:
6387                         return None, None
6388
6389                 if "--debug" in self.myopts:
6390                         for pkg in matched_packages:
6391                                 portage.writemsg("%s %s\n" % \
6392                                         ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6393
6394                 # Filter out any old-style virtual matches if they are
6395                 # mixed with new-style virtual matches.
6396                 cp = portage.dep_getkey(atom)
6397                 if len(matched_packages) > 1 and \
6398                         "virtual" == portage.catsplit(cp)[0]:
6399                         for pkg in matched_packages:
6400                                 if pkg.cp != cp:
6401                                         continue
6402                                 # Got a new-style virtual, so filter
6403                                 # out any old-style virtuals.
6404                                 matched_packages = [pkg for pkg in matched_packages \
6405                                         if pkg.cp == cp]
6406                                 break
6407
6408                 if len(matched_packages) > 1:
6409                         bestmatch = portage.best(
6410                                 [pkg.cpv for pkg in matched_packages])
6411                         matched_packages = [pkg for pkg in matched_packages \
6412                                 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6413
6414                 # ordered by type preference ("ebuild" type is the last resort)
6415                 return  matched_packages[-1], existing_node
6416
6417         def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6418                 """
6419                 Select packages that have already been added to the graph or
6420                 those that are installed and have not been scheduled for
6421                 replacement.
6422                 """
6423                 graph_db = self._graph_trees[root]["porttree"].dbapi
6424                 matches = graph_db.match_pkgs(atom)
6425                 if not matches:
6426                         return None, None
6427                 pkg = matches[-1] # highest match
6428                 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
6429                 return pkg, in_graph
6430
6431         def _complete_graph(self):
6432                 """
6433                 Add any deep dependencies of required sets (args, system, world) that
6434                 have not been pulled into the graph yet. This ensures that the graph
6435                 is consistent such that initially satisfied deep dependencies are not
6436                 broken in the new graph. Initially unsatisfied dependencies are
6437                 irrelevant since we only want to avoid breaking dependencies that are
6438                 intially satisfied.
6439
6440                 Since this method can consume enough time to disturb users, it is
6441                 currently only enabled by the --complete-graph option.
6442                 """
6443                 if "--buildpkgonly" in self.myopts or \
6444                         "recurse" not in self.myparams:
6445                         return 1
6446
6447                 if "complete" not in self.myparams:
6448                         # Skip this to avoid consuming enough time to disturb users.
6449                         return 1
6450
6451                 # Put the depgraph into a mode that causes it to only
6452                 # select packages that have already been added to the
6453                 # graph or those that are installed and have not been
6454                 # scheduled for replacement. Also, toggle the "deep"
6455                 # parameter so that all dependencies are traversed and
6456                 # accounted for.
6457                 self._select_atoms = self._select_atoms_from_graph
6458                 self._select_package = self._select_pkg_from_graph
6459                 already_deep = "deep" in self.myparams
6460                 if not already_deep:
6461                         self.myparams.add("deep")
6462
6463                 for root in self.roots:
6464                         required_set_names = self._required_set_names.copy()
6465                         if root == self.target_root and \
6466                                 (already_deep or "empty" in self.myparams):
6467                                 required_set_names.difference_update(self._sets)
6468                         if not required_set_names and not self._ignored_deps:
6469                                 continue
6470                         root_config = self.roots[root]
6471                         setconfig = root_config.setconfig
6472                         args = []
6473                         # Reuse existing SetArg instances when available.
6474                         for arg in self.digraph.root_nodes():
6475                                 if not isinstance(arg, SetArg):
6476                                         continue
6477                                 if arg.root_config != root_config:
6478                                         continue
6479                                 if arg.name in required_set_names:
6480                                         args.append(arg)
6481                                         required_set_names.remove(arg.name)
6482                         # Create new SetArg instances only when necessary.
6483                         for s in required_set_names:
6484                                 expanded_set = InternalPackageSet(
6485                                         initial_atoms=setconfig.getSetAtoms(s))
6486                                 atom = SETPREFIX + s
6487                                 args.append(SetArg(arg=atom, set=expanded_set,
6488                                         root_config=root_config))
6489                         vardb = root_config.trees["vartree"].dbapi
6490                         for arg in args:
6491                                 for atom in arg.set:
6492                                         self._dep_stack.append(
6493                                                 Dependency(atom=atom, root=root, parent=arg))
6494                         if self._ignored_deps:
6495                                 self._dep_stack.extend(self._ignored_deps)
6496                                 self._ignored_deps = []
6497                         if not self._create_graph(allow_unsatisfied=True):
6498                                 return 0
6499                         # Check the unsatisfied deps to see if any initially satisfied deps
6500                         # will become unsatisfied due to an upgrade. Initially unsatisfied
6501                         # deps are irrelevant since we only want to avoid breaking deps
6502                         # that are initially satisfied.
6503                         while self._unsatisfied_deps:
6504                                 dep = self._unsatisfied_deps.pop()
6505                                 matches = vardb.match_pkgs(dep.atom)
6506                                 if not matches:
6507                                         self._initially_unsatisfied_deps.append(dep)
6508                                         continue
6509                                 # An scheduled installation broke a deep dependency.
6510                                 # Add the installed package to the graph so that it
6511                                 # will be appropriately reported as a slot collision
6512                                 # (possibly solvable via backtracking).
6513                                 pkg = matches[-1] # highest match
6514                                 if not self._add_pkg(pkg, dep):
6515                                         return 0
6516                                 if not self._create_graph(allow_unsatisfied=True):
6517                                         return 0
6518                 return 1
6519
6520         def _pkg(self, cpv, type_name, root_config, installed=False):
6521                 """
6522                 Get a package instance from the cache, or create a new
6523                 one if necessary. Raises KeyError from aux_get if it
6524                 failures for some reason (package does not exist or is
6525                 corrupt).
6526                 """
6527                 operation = "merge"
6528                 if installed:
6529                         operation = "nomerge"
6530                 pkg = self._pkg_cache.get(
6531                         (type_name, root_config.root, cpv, operation))
6532                 if pkg is None:
6533                         tree_type = self.pkg_tree_map[type_name]
6534                         db = root_config.trees[tree_type].dbapi
6535                         db_keys = list(self._trees_orig[root_config.root][
6536                                 tree_type].dbapi._aux_cache_keys)
6537                         try:
6538                                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6539                         except KeyError:
6540                                 raise portage.exception.PackageNotFound(cpv)
6541                         pkg = Package(cpv=cpv, metadata=metadata,
6542                                 root_config=root_config, installed=installed)
6543                         if type_name == "ebuild":
6544                                 settings = self.pkgsettings[root_config.root]
6545                                 settings.setcpv(pkg)
6546                                 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6547                                 pkg.metadata['CHOST'] = settings.get('CHOST', '')
6548                         self._pkg_cache[pkg] = pkg
6549                 return pkg
6550
6551         def validate_blockers(self):
6552                 """Remove any blockers from the digraph that do not match any of the
6553                 packages within the graph.  If necessary, create hard deps to ensure
6554                 correct merge order such that mutually blocking packages are never
6555                 installed simultaneously."""
6556
6557                 if "--buildpkgonly" in self.myopts or \
6558                         "--nodeps" in self.myopts:
6559                         return True
6560
6561                 #if "deep" in self.myparams:
6562                 if True:
6563                         # Pull in blockers from all installed packages that haven't already
6564                         # been pulled into the depgraph.  This is not enabled by default
6565                         # due to the performance penalty that is incurred by all the
6566                         # additional dep_check calls that are required.
6567
6568                         dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6569                         for myroot in self.trees:
6570                                 vardb = self.trees[myroot]["vartree"].dbapi
6571                                 portdb = self.trees[myroot]["porttree"].dbapi
6572                                 pkgsettings = self.pkgsettings[myroot]
6573                                 final_db = self.mydbapi[myroot]
6574
6575                                 blocker_cache = BlockerCache(myroot, vardb)
6576                                 stale_cache = set(blocker_cache)
6577                                 for pkg in vardb:
6578                                         cpv = pkg.cpv
6579                                         stale_cache.discard(cpv)
6580                                         pkg_in_graph = self.digraph.contains(pkg)
6581
6582                                         # Check for masked installed packages. Only warn about
6583                                         # packages that are in the graph in order to avoid warning
6584                                         # about those that will be automatically uninstalled during
6585                                         # the merge process or by --depclean.
6586                                         if pkg in final_db:
6587                                                 if pkg_in_graph and not visible(pkgsettings, pkg):
6588                                                         self._masked_installed.add(pkg)
6589
6590                                         blocker_atoms = None
6591                                         blockers = None
6592                                         if pkg_in_graph:
6593                                                 blockers = []
6594                                                 try:
6595                                                         blockers.extend(
6596                                                                 self._blocker_parents.child_nodes(pkg))
6597                                                 except KeyError:
6598                                                         pass
6599                                                 try:
6600                                                         blockers.extend(
6601                                                                 self._irrelevant_blockers.child_nodes(pkg))
6602                                                 except KeyError:
6603                                                         pass
6604                                         if blockers is not None:
6605                                                 blockers = set(str(blocker.atom) \
6606                                                         for blocker in blockers)
6607
6608                                         # If this node has any blockers, create a "nomerge"
6609                                         # node for it so that they can be enforced.
6610                                         self.spinner.update()
6611                                         blocker_data = blocker_cache.get(cpv)
6612                                         if blocker_data is not None and \
6613                                                 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6614                                                 blocker_data = None
6615
6616                                         # If blocker data from the graph is available, use
6617                                         # it to validate the cache and update the cache if
6618                                         # it seems invalid.
6619                                         if blocker_data is not None and \
6620                                                 blockers is not None:
6621                                                 if not blockers.symmetric_difference(
6622                                                         blocker_data.atoms):
6623                                                         continue
6624                                                 blocker_data = None
6625
6626                                         if blocker_data is None and \
6627                                                 blockers is not None:
6628                                                 # Re-use the blockers from the graph.
6629                                                 blocker_atoms = sorted(blockers)
6630                                                 counter = long(pkg.metadata["COUNTER"])
6631                                                 blocker_data = \
6632                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6633                                                 blocker_cache[pkg.cpv] = blocker_data
6634                                                 continue
6635
6636                                         if blocker_data:
6637                                                 blocker_atoms = blocker_data.atoms
6638                                         else:
6639                                                 # Use aux_get() to trigger FakeVartree global
6640                                                 # updates on *DEPEND when appropriate.
6641                                                 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6642                                                 # It is crucial to pass in final_db here in order to
6643                                                 # optimize dep_check calls by eliminating atoms via
6644                                                 # dep_wordreduce and dep_eval calls.
6645                                                 try:
6646                                                         portage.dep._dep_check_strict = False
6647                                                         try:
6648                                                                 success, atoms = portage.dep_check(depstr,
6649                                                                         final_db, pkgsettings, myuse=pkg.use.enabled,
6650                                                                         trees=self._graph_trees, myroot=myroot)
6651                                                         except Exception, e:
6652                                                                 if isinstance(e, SystemExit):
6653                                                                         raise
6654                                                                 # This is helpful, for example, if a ValueError
6655                                                                 # is thrown from cpv_expand due to multiple
6656                                                                 # matches (this can happen if an atom lacks a
6657                                                                 # category).
6658                                                                 show_invalid_depstring_notice(
6659                                                                         pkg, depstr, str(e))
6660                                                                 del e
6661                                                                 raise
6662                                                 finally:
6663                                                         portage.dep._dep_check_strict = True
6664                                                 if not success:
6665                                                         replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6666                                                         if replacement_pkg and \
6667                                                                 replacement_pkg[0].operation == "merge":
6668                                                                 # This package is being replaced anyway, so
6669                                                                 # ignore invalid dependencies so as not to
6670                                                                 # annoy the user too much (otherwise they'd be
6671                                                                 # forced to manually unmerge it first).
6672                                                                 continue
6673                                                         show_invalid_depstring_notice(pkg, depstr, atoms)
6674                                                         return False
6675                                                 blocker_atoms = [myatom for myatom in atoms \
6676                                                         if myatom.startswith("!")]
6677                                                 blocker_atoms.sort()
6678                                                 counter = long(pkg.metadata["COUNTER"])
6679                                                 blocker_cache[cpv] = \
6680                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6681                                         if blocker_atoms:
6682                                                 try:
6683                                                         for atom in blocker_atoms:
6684                                                                 blocker = Blocker(atom=portage.dep.Atom(atom),
6685                                                                         eapi=pkg.metadata["EAPI"], root=myroot)
6686                                                                 self._blocker_parents.add(blocker, pkg)
6687                                                 except portage.exception.InvalidAtom, e:
6688                                                         depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6689                                                         show_invalid_depstring_notice(
6690                                                                 pkg, depstr, "Invalid Atom: %s" % (e,))
6691                                                         return False
6692                                 for cpv in stale_cache:
6693                                         del blocker_cache[cpv]
6694                                 blocker_cache.flush()
6695                                 del blocker_cache
6696
6697                 # Discard any "uninstall" tasks scheduled by previous calls
6698                 # to this method, since those tasks may not make sense given
6699                 # the current graph state.
6700                 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6701                 if previous_uninstall_tasks:
6702                         self._blocker_uninstalls = digraph()
6703                         self.digraph.difference_update(previous_uninstall_tasks)
6704
6705                 for blocker in self._blocker_parents.leaf_nodes():
6706                         self.spinner.update()
6707                         root_config = self.roots[blocker.root]
6708                         virtuals = root_config.settings.getvirtuals()
6709                         myroot = blocker.root
6710                         initial_db = self.trees[myroot]["vartree"].dbapi
6711                         final_db = self.mydbapi[myroot]
6712                         
6713                         provider_virtual = False
6714                         if blocker.cp in virtuals and \
6715                                 not self._have_new_virt(blocker.root, blocker.cp):
6716                                 provider_virtual = True
6717
6718                         if provider_virtual:
6719                                 atoms = []
6720                                 for provider_entry in virtuals[blocker.cp]:
6721                                         provider_cp = \
6722                                                 portage.dep_getkey(provider_entry)
6723                                         atoms.append(blocker.atom.replace(
6724                                                 blocker.cp, provider_cp))
6725                         else:
6726                                 atoms = [blocker.atom]
6727
6728                         blocked_initial = []
6729                         for atom in atoms:
6730                                 blocked_initial.extend(initial_db.match_pkgs(atom))
6731
6732                         blocked_final = []
6733                         for atom in atoms:
6734                                 blocked_final.extend(final_db.match_pkgs(atom))
6735
6736                         if not blocked_initial and not blocked_final:
6737                                 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6738                                 self._blocker_parents.remove(blocker)
6739                                 # Discard any parents that don't have any more blockers.
6740                                 for pkg in parent_pkgs:
6741                                         self._irrelevant_blockers.add(blocker, pkg)
6742                                         if not self._blocker_parents.child_nodes(pkg):
6743                                                 self._blocker_parents.remove(pkg)
6744                                 continue
6745                         for parent in self._blocker_parents.parent_nodes(blocker):
6746                                 unresolved_blocks = False
6747                                 depends_on_order = set()
6748                                 for pkg in blocked_initial:
6749                                         if pkg.slot_atom == parent.slot_atom:
6750                                                 # TODO: Support blocks within slots in cases where it
6751                                                 # might make sense.  For example, a new version might
6752                                                 # require that the old version be uninstalled at build
6753                                                 # time.
6754                                                 continue
6755                                         if parent.installed:
6756                                                 # Two currently installed packages conflict with
6757                                                 # eachother. Ignore this case since the damage
6758                                                 # is already done and this would be likely to
6759                                                 # confuse users if displayed like a normal blocker.
6760                                                 continue
6761
6762                                         self._blocked_pkgs.add(pkg, blocker)
6763
6764                                         if parent.operation == "merge":
6765                                                 # Maybe the blocked package can be replaced or simply
6766                                                 # unmerged to resolve this block.
6767                                                 depends_on_order.add((pkg, parent))
6768                                                 continue
6769                                         # None of the above blocker resolutions techniques apply,
6770                                         # so apparently this one is unresolvable.
6771                                         unresolved_blocks = True
6772                                 for pkg in blocked_final:
6773                                         if pkg.slot_atom == parent.slot_atom:
6774                                                 # TODO: Support blocks within slots.
6775                                                 continue
6776                                         if parent.operation == "nomerge" and \
6777                                                 pkg.operation == "nomerge":
6778                                                 # This blocker will be handled the next time that a
6779                                                 # merge of either package is triggered.
6780                                                 continue
6781
6782                                         self._blocked_pkgs.add(pkg, blocker)
6783
6784                                         # Maybe the blocking package can be
6785                                         # unmerged to resolve this block.
6786                                         if parent.operation == "merge" and pkg.installed:
6787                                                 depends_on_order.add((pkg, parent))
6788                                                 continue
6789                                         elif parent.operation == "nomerge":
6790                                                 depends_on_order.add((parent, pkg))
6791                                                 continue
6792                                         # None of the above blocker resolutions techniques apply,
6793                                         # so apparently this one is unresolvable.
6794                                         unresolved_blocks = True
6795
6796                                 # Make sure we don't unmerge any package that have been pulled
6797                                 # into the graph.
6798                                 if not unresolved_blocks and depends_on_order:
6799                                         for inst_pkg, inst_task in depends_on_order:
6800                                                 if self.digraph.contains(inst_pkg) and \
6801                                                         self.digraph.parent_nodes(inst_pkg):
6802                                                         unresolved_blocks = True
6803                                                         break
6804
6805                                 if not unresolved_blocks and depends_on_order:
6806                                         for inst_pkg, inst_task in depends_on_order:
6807                                                 uninst_task = Package(built=inst_pkg.built,
6808                                                         cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6809                                                         metadata=inst_pkg.metadata,
6810                                                         operation="uninstall",
6811                                                         root_config=inst_pkg.root_config,
6812                                                         type_name=inst_pkg.type_name)
6813                                                 self._pkg_cache[uninst_task] = uninst_task
6814                                                 # Enforce correct merge order with a hard dep.
6815                                                 self.digraph.addnode(uninst_task, inst_task,
6816                                                         priority=BlockerDepPriority.instance)
6817                                                 # Count references to this blocker so that it can be
6818                                                 # invalidated after nodes referencing it have been
6819                                                 # merged.
6820                                                 self._blocker_uninstalls.addnode(uninst_task, blocker)
6821                                 if not unresolved_blocks and not depends_on_order:
6822                                         self._irrelevant_blockers.add(blocker, parent)
6823                                         self._blocker_parents.remove_edge(blocker, parent)
6824                                         if not self._blocker_parents.parent_nodes(blocker):
6825                                                 self._blocker_parents.remove(blocker)
6826                                         if not self._blocker_parents.child_nodes(parent):
6827                                                 self._blocker_parents.remove(parent)
6828                                 if unresolved_blocks:
6829                                         self._unsolvable_blockers.add(blocker, parent)
6830
6831                 return True
6832
6833         def _accept_blocker_conflicts(self):
6834                 acceptable = False
6835                 for x in ("--buildpkgonly", "--fetchonly",
6836                         "--fetch-all-uri", "--nodeps"):
6837                         if x in self.myopts:
6838                                 acceptable = True
6839                                 break
6840                 return acceptable
6841
6842         def _merge_order_bias(self, mygraph):
6843                 """
6844                 For optimal leaf node selection, promote deep system runtime deps and
6845                 order nodes from highest to lowest overall reference count.
6846                 """
6847
6848                 node_info = {}
6849                 for node in mygraph.order:
6850                         node_info[node] = len(mygraph.parent_nodes(node))
6851                 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
6852
6853                 def cmp_merge_preference(node1, node2):
6854
6855                         if node1.operation == 'uninstall':
6856                                 if node2.operation == 'uninstall':
6857                                         return 0
6858                                 return 1
6859
6860                         if node2.operation == 'uninstall':
6861                                 if node1.operation == 'uninstall':
6862                                         return 0
6863                                 return -1
6864
6865                         node1_sys = node1 in deep_system_deps
6866                         node2_sys = node2 in deep_system_deps
6867                         if node1_sys != node2_sys:
6868                                 if node1_sys:
6869                                         return -1
6870                                 return 1
6871
6872                         return node_info[node2] - node_info[node1]
6873
6874                 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
6875
6876         def altlist(self, reversed=False):
6877
6878                 while self._serialized_tasks_cache is None:
6879                         self._resolve_conflicts()
6880                         try:
6881                                 self._serialized_tasks_cache, self._scheduler_graph = \
6882                                         self._serialize_tasks()
6883                         except self._serialize_tasks_retry:
6884                                 pass
6885
6886                 retlist = self._serialized_tasks_cache[:]
6887                 if reversed:
6888                         retlist.reverse()
6889                 return retlist
6890
6891         def schedulerGraph(self):
6892                 """
6893                 The scheduler graph is identical to the normal one except that
6894                 uninstall edges are reversed in specific cases that require
6895                 conflicting packages to be temporarily installed simultaneously.
6896                 This is intended for use by the Scheduler in it's parallelization
6897                 logic. It ensures that temporary simultaneous installation of
6898                 conflicting packages is avoided when appropriate (especially for
6899                 !!atom blockers), but allowed in specific cases that require it.
6900
6901                 Note that this method calls break_refs() which alters the state of
6902                 internal Package instances such that this depgraph instance should
6903                 not be used to perform any more calculations.
6904                 """
6905                 if self._scheduler_graph is None:
6906                         self.altlist()
6907                 self.break_refs(self._scheduler_graph.order)
6908                 return self._scheduler_graph
6909
6910         def break_refs(self, nodes):
6911                 """
6912                 Take a mergelist like that returned from self.altlist() and
6913                 break any references that lead back to the depgraph. This is
6914                 useful if you want to hold references to packages without
6915                 also holding the depgraph on the heap.
6916                 """
6917                 for node in nodes:
6918                         if hasattr(node, "root_config"):
6919                                 # The FakeVartree references the _package_cache which
6920                                 # references the depgraph. So that Package instances don't
6921                                 # hold the depgraph and FakeVartree on the heap, replace
6922                                 # the RootConfig that references the FakeVartree with the
6923                                 # original RootConfig instance which references the actual
6924                                 # vartree.
6925                                 node.root_config = \
6926                                         self._trees_orig[node.root_config.root]["root_config"]
6927
6928         def _resolve_conflicts(self):
6929                 if not self._complete_graph():
6930                         raise self._unknown_internal_error()
6931
6932                 if not self.validate_blockers():
6933                         raise self._unknown_internal_error()
6934
6935                 if self._slot_collision_info:
6936                         self._process_slot_conflicts()
6937
6938         def _serialize_tasks(self):
6939
6940                 if "--debug" in self.myopts:
6941                         writemsg("\ndigraph:\n\n", noiselevel=-1)
6942                         self.digraph.debug_print()
6943                         writemsg("\n", noiselevel=-1)
6944
6945                 scheduler_graph = self.digraph.copy()
6946                 mygraph=self.digraph.copy()
6947                 # Prune "nomerge" root nodes if nothing depends on them, since
6948                 # otherwise they slow down merge order calculation. Don't remove
6949                 # non-root nodes since they help optimize merge order in some cases
6950                 # such as revdep-rebuild.
6951                 removed_nodes = set()
6952                 while True:
6953                         for node in mygraph.root_nodes():
6954                                 if not isinstance(node, Package) or \
6955                                         node.installed or node.onlydeps:
6956                                         removed_nodes.add(node)
6957                         if removed_nodes:
6958                                 self.spinner.update()
6959                                 mygraph.difference_update(removed_nodes)
6960                         if not removed_nodes:
6961                                 break
6962                         removed_nodes.clear()
6963                 self._merge_order_bias(mygraph)
6964                 def cmp_circular_bias(n1, n2):
6965                         """
6966                         RDEPEND is stronger than PDEPEND and this function
6967                         measures such a strength bias within a circular
6968                         dependency relationship.
6969                         """
6970                         n1_n2_medium = n2 in mygraph.child_nodes(n1,
6971                                 ignore_priority=priority_range.ignore_medium_soft)
6972                         n2_n1_medium = n1 in mygraph.child_nodes(n2,
6973                                 ignore_priority=priority_range.ignore_medium_soft)
6974                         if n1_n2_medium == n2_n1_medium:
6975                                 return 0
6976                         elif n1_n2_medium:
6977                                 return 1
6978                         return -1
6979                 myblocker_uninstalls = self._blocker_uninstalls.copy()
6980                 retlist=[]
6981                 # Contains uninstall tasks that have been scheduled to
6982                 # occur after overlapping blockers have been installed.
6983                 scheduled_uninstalls = set()
6984                 # Contains any Uninstall tasks that have been ignored
6985                 # in order to avoid the circular deps code path. These
6986                 # correspond to blocker conflicts that could not be
6987                 # resolved.
6988                 ignored_uninstall_tasks = set()
6989                 have_uninstall_task = False
6990                 complete = "complete" in self.myparams
6991                 asap_nodes = []
6992
6993                 def get_nodes(**kwargs):
6994                         """
6995                         Returns leaf nodes excluding Uninstall instances
6996                         since those should be executed as late as possible.
6997                         """
6998                         return [node for node in mygraph.leaf_nodes(**kwargs) \
6999                                 if isinstance(node, Package) and \
7000                                         (node.operation != "uninstall" or \
7001                                         node in scheduled_uninstalls)]
7002
7003                 # sys-apps/portage needs special treatment if ROOT="/"
7004                 running_root = self._running_root.root
7005                 from portage.const import PORTAGE_PACKAGE_ATOM
7006                 runtime_deps = InternalPackageSet(
7007                         initial_atoms=[PORTAGE_PACKAGE_ATOM])
7008                 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
7009                         PORTAGE_PACKAGE_ATOM)
7010                 replacement_portage = self.mydbapi[running_root].match_pkgs(
7011                         PORTAGE_PACKAGE_ATOM)
7012
7013                 if running_portage:
7014                         running_portage = running_portage[0]
7015                 else:
7016                         running_portage = None
7017
7018                 if replacement_portage:
7019                         replacement_portage = replacement_portage[0]
7020                 else:
7021                         replacement_portage = None
7022
7023                 if replacement_portage == running_portage:
7024                         replacement_portage = None
7025
7026                 if replacement_portage is not None:
7027                         # update from running_portage to replacement_portage asap
7028                         asap_nodes.append(replacement_portage)
7029
7030                 if running_portage is not None:
7031                         try:
7032                                 portage_rdepend = self._select_atoms_highest_available(
7033                                         running_root, running_portage.metadata["RDEPEND"],
7034                                         myuse=running_portage.use.enabled,
7035                                         parent=running_portage, strict=False)
7036                         except portage.exception.InvalidDependString, e:
7037                                 portage.writemsg("!!! Invalid RDEPEND in " + \
7038                                         "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
7039                                         (running_root, running_portage.cpv, e), noiselevel=-1)
7040                                 del e
7041                                 portage_rdepend = []
7042                         runtime_deps.update(atom for atom in portage_rdepend \
7043                                 if not atom.startswith("!"))
7044
7045                 def gather_deps(ignore_priority, mergeable_nodes,
7046                         selected_nodes, node):
7047                         """
7048                         Recursively gather a group of nodes that RDEPEND on
7049                         eachother. This ensures that they are merged as a group
7050                         and get their RDEPENDs satisfied as soon as possible.
7051                         """
7052                         if node in selected_nodes:
7053                                 return True
7054                         if node not in mergeable_nodes:
7055                                 return False
7056                         if node == replacement_portage and \
7057                                 mygraph.child_nodes(node,
7058                                 ignore_priority=priority_range.ignore_medium_soft):
7059                                 # Make sure that portage always has all of it's
7060                                 # RDEPENDs installed first.
7061                                 return False
7062                         selected_nodes.add(node)
7063                         for child in mygraph.child_nodes(node,
7064                                 ignore_priority=ignore_priority):
7065                                 if not gather_deps(ignore_priority,
7066                                         mergeable_nodes, selected_nodes, child):
7067                                         return False
7068                         return True
7069
7070                 def ignore_uninst_or_med(priority):
7071                         if priority is BlockerDepPriority.instance:
7072                                 return True
7073                         return priority_range.ignore_medium(priority)
7074
7075                 def ignore_uninst_or_med_soft(priority):
7076                         if priority is BlockerDepPriority.instance:
7077                                 return True
7078                         return priority_range.ignore_medium_soft(priority)
7079
7080                 tree_mode = "--tree" in self.myopts
7081                 # Tracks whether or not the current iteration should prefer asap_nodes
7082                 # if available.  This is set to False when the previous iteration
7083                 # failed to select any nodes.  It is reset whenever nodes are
7084                 # successfully selected.
7085                 prefer_asap = True
7086
7087                 # Controls whether or not the current iteration should drop edges that
7088                 # are "satisfied" by installed packages, in order to solve circular
7089                 # dependencies. The deep runtime dependencies of installed packages are
7090                 # not checked in this case (bug #199856), so it must be avoided
7091                 # whenever possible.
7092                 drop_satisfied = False
7093
7094                 # State of variables for successive iterations that loosen the
7095                 # criteria for node selection.
7096                 #
7097                 # iteration   prefer_asap   drop_satisfied
7098                 # 1           True          False
7099                 # 2           False         False
7100                 # 3           False         True
7101                 #
7102                 # If no nodes are selected on the last iteration, it is due to
7103                 # unresolved blockers or circular dependencies.
7104
7105                 while not mygraph.empty():
7106                         self.spinner.update()
7107                         selected_nodes = None
7108                         ignore_priority = None
7109                         if drop_satisfied or (prefer_asap and asap_nodes):
7110                                 priority_range = DepPrioritySatisfiedRange
7111                         else:
7112                                 priority_range = DepPriorityNormalRange
7113                         if prefer_asap and asap_nodes:
7114                                 # ASAP nodes are merged before their soft deps. Go ahead and
7115                                 # select root nodes here if necessary, since it's typical for
7116                                 # the parent to have been removed from the graph already.
7117                                 asap_nodes = [node for node in asap_nodes \
7118                                         if mygraph.contains(node)]
7119                                 for node in asap_nodes:
7120                                         if not mygraph.child_nodes(node,
7121                                                 ignore_priority=priority_range.ignore_soft):
7122                                                 selected_nodes = [node]
7123                                                 asap_nodes.remove(node)
7124                                                 break
7125                         if not selected_nodes and \
7126                                 not (prefer_asap and asap_nodes):
7127                                 for i in xrange(priority_range.NONE,
7128                                         priority_range.MEDIUM_SOFT + 1):
7129                                         ignore_priority = priority_range.ignore_priority[i]
7130                                         nodes = get_nodes(ignore_priority=ignore_priority)
7131                                         if nodes:
7132                                                 # If there is a mix of uninstall nodes with other
7133                                                 # types, save the uninstall nodes for later since
7134                                                 # sometimes a merge node will render an uninstall
7135                                                 # node unnecessary (due to occupying the same slot),
7136                                                 # and we want to avoid executing a separate uninstall
7137                                                 # task in that case.
7138                                                 if len(nodes) > 1:
7139                                                         good_uninstalls = []
7140                                                         with_some_uninstalls_excluded = []
7141                                                         for node in nodes:
7142                                                                 if node.operation == "uninstall":
7143                                                                         slot_node = self.mydbapi[node.root
7144                                                                                 ].match_pkgs(node.slot_atom)
7145                                                                         if slot_node and \
7146                                                                                 slot_node[0].operation == "merge":
7147                                                                                 continue
7148                                                                         good_uninstalls.append(node)
7149                                                                 with_some_uninstalls_excluded.append(node)
7150                                                         if good_uninstalls:
7151                                                                 nodes = good_uninstalls
7152                                                         elif with_some_uninstalls_excluded:
7153                                                                 nodes = with_some_uninstalls_excluded
7154                                                         else:
7155                                                                 nodes = nodes
7156
7157                                                 if ignore_priority is None and not tree_mode:
7158                                                         # Greedily pop all of these nodes since no
7159                                                         # relationship has been ignored. This optimization
7160                                                         # destroys --tree output, so it's disabled in tree
7161                                                         # mode.
7162                                                         selected_nodes = nodes
7163                                                 else:
7164                                                         # For optimal merge order:
7165                                                         #  * Only pop one node.
7166                                                         #  * Removing a root node (node without a parent)
7167                                                         #    will not produce a leaf node, so avoid it.
7168                                                         #  * It's normal for a selected uninstall to be a
7169                                                         #    root node, so don't check them for parents.
7170                                                         for node in nodes:
7171                                                                 if node.operation == "uninstall" or \
7172                                                                         mygraph.parent_nodes(node):
7173                                                                         selected_nodes = [node]
7174                                                                         break
7175
7176                                                 if selected_nodes:
7177                                                         break
7178
7179                         if not selected_nodes:
7180                                 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
7181                                 if nodes:
7182                                         mergeable_nodes = set(nodes)
7183                                         if prefer_asap and asap_nodes:
7184                                                 nodes = asap_nodes
7185                                         for i in xrange(priority_range.SOFT,
7186                                                 priority_range.MEDIUM_SOFT + 1):
7187                                                 ignore_priority = priority_range.ignore_priority[i]
7188                                                 for node in nodes:
7189                                                         if not mygraph.parent_nodes(node):
7190                                                                 continue
7191                                                         selected_nodes = set()
7192                                                         if gather_deps(ignore_priority,
7193                                                                 mergeable_nodes, selected_nodes, node):
7194                                                                 break
7195                                                         else:
7196                                                                 selected_nodes = None
7197                                                 if selected_nodes:
7198                                                         break
7199
7200                                         if prefer_asap and asap_nodes and not selected_nodes:
7201                                                 # We failed to find any asap nodes to merge, so ignore
7202                                                 # them for the next iteration.
7203                                                 prefer_asap = False
7204                                                 continue
7205
7206                         if selected_nodes and ignore_priority is not None:
7207                                 # Try to merge ignored medium_soft deps as soon as possible
7208                                 # if they're not satisfied by installed packages.
7209                                 for node in selected_nodes:
7210                                         children = set(mygraph.child_nodes(node))
7211                                         soft = children.difference(
7212                                                 mygraph.child_nodes(node,
7213                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
7214                                         medium_soft = children.difference(
7215                                                 mygraph.child_nodes(node,
7216                                                         ignore_priority = \
7217                                                         DepPrioritySatisfiedRange.ignore_medium_soft))
7218                                         medium_soft.difference_update(soft)
7219                                         for child in medium_soft:
7220                                                 if child in selected_nodes:
7221                                                         continue
7222                                                 if child in asap_nodes:
7223                                                         continue
7224                                                 asap_nodes.append(child)
7225
7226                         if selected_nodes and len(selected_nodes) > 1:
7227                                 if not isinstance(selected_nodes, list):
7228                                         selected_nodes = list(selected_nodes)
7229                                 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
7230
7231                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7232                                 # An Uninstall task needs to be executed in order to
7233                                 # avoid conflict if possible.
7234
7235                                 if drop_satisfied:
7236                                         priority_range = DepPrioritySatisfiedRange
7237                                 else:
7238                                         priority_range = DepPriorityNormalRange
7239
7240                                 mergeable_nodes = get_nodes(
7241                                         ignore_priority=ignore_uninst_or_med)
7242
7243                                 min_parent_deps = None
7244                                 uninst_task = None
7245                                 for task in myblocker_uninstalls.leaf_nodes():
7246                                         # Do some sanity checks so that system or world packages
7247                                         # don't get uninstalled inappropriately here (only really
7248                                         # necessary when --complete-graph has not been enabled).
7249
7250                                         if task in ignored_uninstall_tasks:
7251                                                 continue
7252
7253                                         if task in scheduled_uninstalls:
7254                                                 # It's been scheduled but it hasn't
7255                                                 # been executed yet due to dependence
7256                                                 # on installation of blocking packages.
7257                                                 continue
7258
7259                                         root_config = self.roots[task.root]
7260                                         inst_pkg = self._pkg_cache[
7261                                                 ("installed", task.root, task.cpv, "nomerge")]
7262
7263                                         if self.digraph.contains(inst_pkg):
7264                                                 continue
7265
7266                                         forbid_overlap = False
7267                                         heuristic_overlap = False
7268                                         for blocker in myblocker_uninstalls.parent_nodes(task):
7269                                                 if blocker.eapi in ("0", "1"):
7270                                                         heuristic_overlap = True
7271                                                 elif blocker.atom.blocker.overlap.forbid:
7272                                                         forbid_overlap = True
7273                                                         break
7274                                         if forbid_overlap and running_root == task.root:
7275                                                 continue
7276
7277                                         if heuristic_overlap and running_root == task.root:
7278                                                 # Never uninstall sys-apps/portage or it's essential
7279                                                 # dependencies, except through replacement.
7280                                                 try:
7281                                                         runtime_dep_atoms = \
7282                                                                 list(runtime_deps.iterAtomsForPackage(task))
7283                                                 except portage.exception.InvalidDependString, e:
7284                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7285                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7286                                                                 (task.root, task.cpv, e), noiselevel=-1)
7287                                                         del e
7288                                                         continue
7289
7290                                                 # Don't uninstall a runtime dep if it appears
7291                                                 # to be the only suitable one installed.
7292                                                 skip = False
7293                                                 vardb = root_config.trees["vartree"].dbapi
7294                                                 for atom in runtime_dep_atoms:
7295                                                         other_version = None
7296                                                         for pkg in vardb.match_pkgs(atom):
7297                                                                 if pkg.cpv == task.cpv and \
7298                                                                         pkg.metadata["COUNTER"] == \
7299                                                                         task.metadata["COUNTER"]:
7300                                                                         continue
7301                                                                 other_version = pkg
7302                                                                 break
7303                                                         if other_version is None:
7304                                                                 skip = True
7305                                                                 break
7306                                                 if skip:
7307                                                         continue
7308
7309                                                 # For packages in the system set, don't take
7310                                                 # any chances. If the conflict can't be resolved
7311                                                 # by a normal replacement operation then abort.
7312                                                 skip = False
7313                                                 try:
7314                                                         for atom in root_config.sets[
7315                                                                 "system"].iterAtomsForPackage(task):
7316                                                                 skip = True
7317                                                                 break
7318                                                 except portage.exception.InvalidDependString, e:
7319                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7320                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7321                                                                 (task.root, task.cpv, e), noiselevel=-1)
7322                                                         del e
7323                                                         skip = True
7324                                                 if skip:
7325                                                         continue
7326
7327                                         # Note that the world check isn't always
7328                                         # necessary since self._complete_graph() will
7329                                         # add all packages from the system and world sets to the
7330                                         # graph. This just allows unresolved conflicts to be
7331                                         # detected as early as possible, which makes it possible
7332                                         # to avoid calling self._complete_graph() when it is
7333                                         # unnecessary due to blockers triggering an abortion.
7334                                         if not complete:
7335                                                 # For packages in the world set, go ahead an uninstall
7336                                                 # when necessary, as long as the atom will be satisfied
7337                                                 # in the final state.
7338                                                 graph_db = self.mydbapi[task.root]
7339                                                 skip = False
7340                                                 try:
7341                                                         for atom in root_config.sets[
7342                                                                 "world"].iterAtomsForPackage(task):
7343                                                                 satisfied = False
7344                                                                 for pkg in graph_db.match_pkgs(atom):
7345                                                                         if pkg == inst_pkg:
7346                                                                                 continue
7347                                                                         satisfied = True
7348                                                                         break
7349                                                                 if not satisfied:
7350                                                                         skip = True
7351                                                                         self._blocked_world_pkgs[inst_pkg] = atom
7352                                                                         break
7353                                                 except portage.exception.InvalidDependString, e:
7354                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7355                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7356                                                                 (task.root, task.cpv, e), noiselevel=-1)
7357                                                         del e
7358                                                         skip = True
7359                                                 if skip:
7360                                                         continue
7361
7362                                         # Check the deps of parent nodes to ensure that
7363                                         # the chosen task produces a leaf node. Maybe
7364                                         # this can be optimized some more to make the
7365                                         # best possible choice, but the current algorithm
7366                                         # is simple and should be near optimal for most
7367                                         # common cases.
7368                                         mergeable_parent = False
7369                                         parent_deps = set()
7370                                         for parent in mygraph.parent_nodes(task):
7371                                                 parent_deps.update(mygraph.child_nodes(parent,
7372                                                         ignore_priority=priority_range.ignore_medium_soft))
7373                                                 if parent in mergeable_nodes and \
7374                                                         gather_deps(ignore_uninst_or_med_soft,
7375                                                         mergeable_nodes, set(), parent):
7376                                                         mergeable_parent = True
7377
7378                                         if not mergeable_parent:
7379                                                 continue
7380
7381                                         parent_deps.remove(task)
7382                                         if min_parent_deps is None or \
7383                                                 len(parent_deps) < min_parent_deps:
7384                                                 min_parent_deps = len(parent_deps)
7385                                                 uninst_task = task
7386
7387                                 if uninst_task is not None:
7388                                         # The uninstall is performed only after blocking
7389                                         # packages have been merged on top of it. File
7390                                         # collisions between blocking packages are detected
7391                                         # and removed from the list of files to be uninstalled.
7392                                         scheduled_uninstalls.add(uninst_task)
7393                                         parent_nodes = mygraph.parent_nodes(uninst_task)
7394
7395                                         # Reverse the parent -> uninstall edges since we want
7396                                         # to do the uninstall after blocking packages have
7397                                         # been merged on top of it.
7398                                         mygraph.remove(uninst_task)
7399                                         for blocked_pkg in parent_nodes:
7400                                                 mygraph.add(blocked_pkg, uninst_task,
7401                                                         priority=BlockerDepPriority.instance)
7402                                                 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7403                                                 scheduler_graph.add(blocked_pkg, uninst_task,
7404                                                         priority=BlockerDepPriority.instance)
7405
7406                                         # Reset the state variables for leaf node selection and
7407                                         # continue trying to select leaf nodes.
7408                                         prefer_asap = True
7409                                         drop_satisfied = False
7410                                         continue
7411
7412                         if not selected_nodes:
7413                                 # Only select root nodes as a last resort. This case should
7414                                 # only trigger when the graph is nearly empty and the only
7415                                 # remaining nodes are isolated (no parents or children). Since
7416                                 # the nodes must be isolated, ignore_priority is not needed.
7417                                 selected_nodes = get_nodes()
7418
7419                         if not selected_nodes and not drop_satisfied:
7420                                 drop_satisfied = True
7421                                 continue
7422
7423                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7424                                 # If possible, drop an uninstall task here in order to avoid
7425                                 # the circular deps code path. The corresponding blocker will
7426                                 # still be counted as an unresolved conflict.
7427                                 uninst_task = None
7428                                 for node in myblocker_uninstalls.leaf_nodes():
7429                                         try:
7430                                                 mygraph.remove(node)
7431                                         except KeyError:
7432                                                 pass
7433                                         else:
7434                                                 uninst_task = node
7435                                                 ignored_uninstall_tasks.add(node)
7436                                                 break
7437
7438                                 if uninst_task is not None:
7439                                         # Reset the state variables for leaf node selection and
7440                                         # continue trying to select leaf nodes.
7441                                         prefer_asap = True
7442                                         drop_satisfied = False
7443                                         continue
7444
7445                         if not selected_nodes:
7446                                 self._circular_deps_for_display = mygraph
7447                                 raise self._unknown_internal_error()
7448
7449                         # At this point, we've succeeded in selecting one or more nodes, so
7450                         # reset state variables for leaf node selection.
7451                         prefer_asap = True
7452                         drop_satisfied = False
7453
7454                         mygraph.difference_update(selected_nodes)
7455
7456                         for node in selected_nodes:
7457                                 if isinstance(node, Package) and \
7458                                         node.operation == "nomerge":
7459                                         continue
7460
7461                                 # Handle interactions between blockers
7462                                 # and uninstallation tasks.
7463                                 solved_blockers = set()
7464                                 uninst_task = None
7465                                 if isinstance(node, Package) and \
7466                                         "uninstall" == node.operation:
7467                                         have_uninstall_task = True
7468                                         uninst_task = node
7469                                 else:
7470                                         vardb = self.trees[node.root]["vartree"].dbapi
7471                                         previous_cpv = vardb.match(node.slot_atom)
7472                                         if previous_cpv:
7473                                                 # The package will be replaced by this one, so remove
7474                                                 # the corresponding Uninstall task if necessary.
7475                                                 previous_cpv = previous_cpv[0]
7476                                                 uninst_task = \
7477                                                         ("installed", node.root, previous_cpv, "uninstall")
7478                                                 try:
7479                                                         mygraph.remove(uninst_task)
7480                                                 except KeyError:
7481                                                         pass
7482
7483                                 if uninst_task is not None and \
7484                                         uninst_task not in ignored_uninstall_tasks and \
7485                                         myblocker_uninstalls.contains(uninst_task):
7486                                         blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7487                                         myblocker_uninstalls.remove(uninst_task)
7488                                         # Discard any blockers that this Uninstall solves.
7489                                         for blocker in blocker_nodes:
7490                                                 if not myblocker_uninstalls.child_nodes(blocker):
7491                                                         myblocker_uninstalls.remove(blocker)
7492                                                         solved_blockers.add(blocker)
7493
7494                                 retlist.append(node)
7495
7496                                 if (isinstance(node, Package) and \
7497                                         "uninstall" == node.operation) or \
7498                                         (uninst_task is not None and \
7499                                         uninst_task in scheduled_uninstalls):
7500                                         # Include satisfied blockers in the merge list
7501                                         # since the user might be interested and also
7502                                         # it serves as an indicator that blocking packages
7503                                         # will be temporarily installed simultaneously.
7504                                         for blocker in solved_blockers:
7505                                                 retlist.append(Blocker(atom=blocker.atom,
7506                                                         root=blocker.root, eapi=blocker.eapi,
7507                                                         satisfied=True))
7508
7509                 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7510                 for node in myblocker_uninstalls.root_nodes():
7511                         unsolvable_blockers.add(node)
7512
7513                 for blocker in unsolvable_blockers:
7514                         retlist.append(blocker)
7515
7516                 # If any Uninstall tasks need to be executed in order
7517                 # to avoid a conflict, complete the graph with any
7518                 # dependencies that may have been initially
7519                 # neglected (to ensure that unsafe Uninstall tasks
7520                 # are properly identified and blocked from execution).
7521                 if have_uninstall_task and \
7522                         not complete and \
7523                         not unsolvable_blockers:
7524                         self.myparams.add("complete")
7525                         raise self._serialize_tasks_retry("")
7526
7527                 if unsolvable_blockers and \
7528                         not self._accept_blocker_conflicts():
7529                         self._unsatisfied_blockers_for_display = unsolvable_blockers
7530                         self._serialized_tasks_cache = retlist[:]
7531                         self._scheduler_graph = scheduler_graph
7532                         raise self._unknown_internal_error()
7533
7534                 if self._slot_collision_info and \
7535                         not self._accept_blocker_conflicts():
7536                         self._serialized_tasks_cache = retlist[:]
7537                         self._scheduler_graph = scheduler_graph
7538                         raise self._unknown_internal_error()
7539
7540                 return retlist, scheduler_graph
7541
7542         def _show_circular_deps(self, mygraph):
7543                 # No leaf nodes are available, so we have a circular
7544                 # dependency panic situation.  Reduce the noise level to a
7545                 # minimum via repeated elimination of root nodes since they
7546                 # have no parents and thus can not be part of a cycle.
7547                 while True:
7548                         root_nodes = mygraph.root_nodes(
7549                                 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
7550                         if not root_nodes:
7551                                 break
7552                         mygraph.difference_update(root_nodes)
7553                 # Display the USE flags that are enabled on nodes that are part
7554                 # of dependency cycles in case that helps the user decide to
7555                 # disable some of them.
7556                 display_order = []
7557                 tempgraph = mygraph.copy()
7558                 while not tempgraph.empty():
7559                         nodes = tempgraph.leaf_nodes()
7560                         if not nodes:
7561                                 node = tempgraph.order[0]
7562                         else:
7563                                 node = nodes[0]
7564                         display_order.append(node)
7565                         tempgraph.remove(node)
7566                 display_order.reverse()
7567                 self.myopts.pop("--quiet", None)
7568                 self.myopts.pop("--verbose", None)
7569                 self.myopts["--tree"] = True
7570                 portage.writemsg("\n\n", noiselevel=-1)
7571                 self.display(display_order)
7572                 prefix = colorize("BAD", " * ")
7573                 portage.writemsg("\n", noiselevel=-1)
7574                 portage.writemsg(prefix + "Error: circular dependencies:\n",
7575                         noiselevel=-1)
7576                 portage.writemsg("\n", noiselevel=-1)
7577                 mygraph.debug_print()
7578                 portage.writemsg("\n", noiselevel=-1)
7579                 portage.writemsg(prefix + "Note that circular dependencies " + \
7580                         "can often be avoided by temporarily\n", noiselevel=-1)
7581                 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7582                         "optional dependencies.\n", noiselevel=-1)
7583
7584         def _show_merge_list(self):
7585                 if self._serialized_tasks_cache is not None and \
7586                         not (self._displayed_list and \
7587                         (self._displayed_list == self._serialized_tasks_cache or \
7588                         self._displayed_list == \
7589                                 list(reversed(self._serialized_tasks_cache)))):
7590                         display_list = self._serialized_tasks_cache[:]
7591                         if "--tree" in self.myopts:
7592                                 display_list.reverse()
7593                         self.display(display_list)
7594
7595         def _show_unsatisfied_blockers(self, blockers):
7596                 self._show_merge_list()
7597                 msg = "Error: The above package list contains " + \
7598                         "packages which cannot be installed " + \
7599                         "at the same time on the same system."
7600                 prefix = colorize("BAD", " * ")
7601                 from textwrap import wrap
7602                 portage.writemsg("\n", noiselevel=-1)
7603                 for line in wrap(msg, 70):
7604                         portage.writemsg(prefix + line + "\n", noiselevel=-1)
7605
7606                 # Display the conflicting packages along with the packages
7607                 # that pulled them in. This is helpful for troubleshooting
7608                 # cases in which blockers don't solve automatically and
7609                 # the reasons are not apparent from the normal merge list
7610                 # display.
7611
7612                 conflict_pkgs = {}
7613                 for blocker in blockers:
7614                         for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7615                                 self._blocker_parents.parent_nodes(blocker)):
7616                                 parent_atoms = self._parent_atoms.get(pkg)
7617                                 if not parent_atoms:
7618                                         atom = self._blocked_world_pkgs.get(pkg)
7619                                         if atom is not None:
7620                                                 parent_atoms = set([("@world", atom)])
7621                                 if parent_atoms:
7622                                         conflict_pkgs[pkg] = parent_atoms
7623
7624                 if conflict_pkgs:
7625                         # Reduce noise by pruning packages that are only
7626                         # pulled in by other conflict packages.
7627                         pruned_pkgs = set()
7628                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7629                                 relevant_parent = False
7630                                 for parent, atom in parent_atoms:
7631                                         if parent not in conflict_pkgs:
7632                                                 relevant_parent = True
7633                                                 break
7634                                 if not relevant_parent:
7635                                         pruned_pkgs.add(pkg)
7636                         for pkg in pruned_pkgs:
7637                                 del conflict_pkgs[pkg]
7638
7639                 if conflict_pkgs:
7640                         msg = []
7641                         msg.append("\n")
7642                         indent = "  "
7643                         # Max number of parents shown, to avoid flooding the display.
7644                         max_parents = 3
7645                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7646
7647                                 pruned_list = set()
7648
7649                                 # Prefer packages that are not directly involved in a conflict.
7650                                 for parent_atom in parent_atoms:
7651                                         if len(pruned_list) >= max_parents:
7652                                                 break
7653                                         parent, atom = parent_atom
7654                                         if parent not in conflict_pkgs:
7655                                                 pruned_list.add(parent_atom)
7656
7657                                 for parent_atom in parent_atoms:
7658                                         if len(pruned_list) >= max_parents:
7659                                                 break
7660                                         pruned_list.add(parent_atom)
7661
7662                                 omitted_parents = len(parent_atoms) - len(pruned_list)
7663                                 msg.append(indent + "%s pulled in by\n" % pkg)
7664
7665                                 for parent_atom in pruned_list:
7666                                         parent, atom = parent_atom
7667                                         msg.append(2*indent)
7668                                         if isinstance(parent,
7669                                                 (PackageArg, AtomArg)):
7670                                                 # For PackageArg and AtomArg types, it's
7671                                                 # redundant to display the atom attribute.
7672                                                 msg.append(str(parent))
7673                                         else:
7674                                                 # Display the specific atom from SetArg or
7675                                                 # Package types.
7676                                                 msg.append("%s required by %s" % (atom, parent))
7677                                         msg.append("\n")
7678
7679                                 if omitted_parents:
7680                                         msg.append(2*indent)
7681                                         msg.append("(and %d more)\n" % omitted_parents)
7682
7683                                 msg.append("\n")
7684
7685                         sys.stderr.write("".join(msg))
7686                         sys.stderr.flush()
7687
7688                 if "--quiet" not in self.myopts:
7689                         show_blocker_docs_link()
7690
7691         def display(self, mylist, favorites=[], verbosity=None):
7692
7693                 # This is used to prevent display_problems() from
7694                 # redundantly displaying this exact same merge list
7695                 # again via _show_merge_list().
7696                 self._displayed_list = mylist
7697
7698                 if verbosity is None:
7699                         verbosity = ("--quiet" in self.myopts and 1 or \
7700                                 "--verbose" in self.myopts and 3 or 2)
7701                 favorites_set = InternalPackageSet(favorites)
7702                 oneshot = "--oneshot" in self.myopts or \
7703                         "--onlydeps" in self.myopts
7704                 columns = "--columns" in self.myopts
7705                 changelogs=[]
7706                 p=[]
7707                 blockers = []
7708
7709                 counters = PackageCounters()
7710
7711                 if verbosity == 1 and "--verbose" not in self.myopts:
7712                         def create_use_string(*args):
7713                                 return ""
7714                 else:
7715                         def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7716                                 old_iuse, old_use,
7717                                 is_new, reinst_flags,
7718                                 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7719                                 alphabetical=("--alphabetical" in self.myopts)):
7720                                 enabled = []
7721                                 if alphabetical:
7722                                         disabled = enabled
7723                                         removed = enabled
7724                                 else:
7725                                         disabled = []
7726                                         removed = []
7727                                 cur_iuse = set(cur_iuse)
7728                                 enabled_flags = cur_iuse.intersection(cur_use)
7729                                 removed_iuse = set(old_iuse).difference(cur_iuse)
7730                                 any_iuse = cur_iuse.union(old_iuse)
7731                                 any_iuse = list(any_iuse)
7732                                 any_iuse.sort()
7733                                 for flag in any_iuse:
7734                                         flag_str = None
7735                                         isEnabled = False
7736                                         reinst_flag = reinst_flags and flag in reinst_flags
7737                                         if flag in enabled_flags:
7738                                                 isEnabled = True
7739                                                 if is_new or flag in old_use and \
7740                                                         (all_flags or reinst_flag):
7741                                                         flag_str = red(flag)
7742                                                 elif flag not in old_iuse:
7743                                                         flag_str = yellow(flag) + "%*"
7744                                                 elif flag not in old_use:
7745                                                         flag_str = green(flag) + "*"
7746                                         elif flag in removed_iuse:
7747                                                 if all_flags or reinst_flag:
7748                                                         flag_str = yellow("-" + flag) + "%"
7749                                                         if flag in old_use:
7750                                                                 flag_str += "*"
7751                                                         flag_str = "(" + flag_str + ")"
7752                                                         removed.append(flag_str)
7753                                                 continue
7754                                         else:
7755                                                 if is_new or flag in old_iuse and \
7756                                                         flag not in old_use and \
7757                                                         (all_flags or reinst_flag):
7758                                                         flag_str = blue("-" + flag)
7759                                                 elif flag not in old_iuse:
7760                                                         flag_str = yellow("-" + flag)
7761                                                         if flag not in iuse_forced:
7762                                                                 flag_str += "%"
7763                                                 elif flag in old_use:
7764                                                         flag_str = green("-" + flag) + "*"
7765                                         if flag_str:
7766                                                 if flag in iuse_forced:
7767                                                         flag_str = "(" + flag_str + ")"
7768                                                 if isEnabled:
7769                                                         enabled.append(flag_str)
7770                                                 else:
7771                                                         disabled.append(flag_str)
7772
7773                                 if alphabetical:
7774                                         ret = " ".join(enabled)
7775                                 else:
7776                                         ret = " ".join(enabled + disabled + removed)
7777                                 if ret:
7778                                         ret = '%s="%s" ' % (name, ret)
7779                                 return ret
7780
7781                 repo_display = RepoDisplay(self.roots)
7782
7783                 tree_nodes = []
7784                 display_list = []
7785                 mygraph = self.digraph.copy()
7786
7787                 # If there are any Uninstall instances, add the corresponding
7788                 # blockers to the digraph (useful for --tree display).
7789
7790                 executed_uninstalls = set(node for node in mylist \
7791                         if isinstance(node, Package) and node.operation == "unmerge")
7792
7793                 for uninstall in self._blocker_uninstalls.leaf_nodes():
7794                         uninstall_parents = \
7795                                 self._blocker_uninstalls.parent_nodes(uninstall)
7796                         if not uninstall_parents:
7797                                 continue
7798
7799                         # Remove the corresponding "nomerge" node and substitute
7800                         # the Uninstall node.
7801                         inst_pkg = self._pkg_cache[
7802                                 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7803                         try:
7804                                 mygraph.remove(inst_pkg)
7805                         except KeyError:
7806                                 pass
7807
7808                         try:
7809                                 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7810                         except KeyError:
7811                                 inst_pkg_blockers = []
7812
7813                         # Break the Package -> Uninstall edges.
7814                         mygraph.remove(uninstall)
7815
7816                         # Resolution of a package's blockers
7817                         # depend on it's own uninstallation.
7818                         for blocker in inst_pkg_blockers:
7819                                 mygraph.add(uninstall, blocker)
7820
7821                         # Expand Package -> Uninstall edges into
7822                         # Package -> Blocker -> Uninstall edges.
7823                         for blocker in uninstall_parents:
7824                                 mygraph.add(uninstall, blocker)
7825                                 for parent in self._blocker_parents.parent_nodes(blocker):
7826                                         if parent != inst_pkg:
7827                                                 mygraph.add(blocker, parent)
7828
7829                         # If the uninstall task did not need to be executed because
7830                         # of an upgrade, display Blocker -> Upgrade edges since the
7831                         # corresponding Blocker -> Uninstall edges will not be shown.
7832                         upgrade_node = \
7833                                 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7834                         if upgrade_node is not None and \
7835                                 uninstall not in executed_uninstalls:
7836                                 for blocker in uninstall_parents:
7837                                         mygraph.add(upgrade_node, blocker)
7838
7839                 unsatisfied_blockers = []
7840                 i = 0
7841                 depth = 0
7842                 shown_edges = set()
7843                 for x in mylist:
7844                         if isinstance(x, Blocker) and not x.satisfied:
7845                                 unsatisfied_blockers.append(x)
7846                                 continue
7847                         graph_key = x
7848                         if "--tree" in self.myopts:
7849                                 depth = len(tree_nodes)
7850                                 while depth and graph_key not in \
7851                                         mygraph.child_nodes(tree_nodes[depth-1]):
7852                                                 depth -= 1
7853                                 if depth:
7854                                         tree_nodes = tree_nodes[:depth]
7855                                         tree_nodes.append(graph_key)
7856                                         display_list.append((x, depth, True))
7857                                         shown_edges.add((graph_key, tree_nodes[depth-1]))
7858                                 else:
7859                                         traversed_nodes = set() # prevent endless circles
7860                                         traversed_nodes.add(graph_key)
7861                                         def add_parents(current_node, ordered):
7862                                                 parent_nodes = None
7863                                                 # Do not traverse to parents if this node is an
7864                                                 # an argument or a direct member of a set that has
7865                                                 # been specified as an argument (system or world).
7866                                                 if current_node not in self._set_nodes:
7867                                                         parent_nodes = mygraph.parent_nodes(current_node)
7868                                                 if parent_nodes:
7869                                                         child_nodes = set(mygraph.child_nodes(current_node))
7870                                                         selected_parent = None
7871                                                         # First, try to avoid a direct cycle.
7872                                                         for node in parent_nodes:
7873                                                                 if not isinstance(node, (Blocker, Package)):
7874                                                                         continue
7875                                                                 if node not in traversed_nodes and \
7876                                                                         node not in child_nodes:
7877                                                                         edge = (current_node, node)
7878                                                                         if edge in shown_edges:
7879                                                                                 continue
7880                                                                         selected_parent = node
7881                                                                         break
7882                                                         if not selected_parent:
7883                                                                 # A direct cycle is unavoidable.
7884                                                                 for node in parent_nodes:
7885                                                                         if not isinstance(node, (Blocker, Package)):
7886                                                                                 continue
7887                                                                         if node not in traversed_nodes:
7888                                                                                 edge = (current_node, node)
7889                                                                                 if edge in shown_edges:
7890                                                                                         continue
7891                                                                                 selected_parent = node
7892                                                                                 break
7893                                                         if selected_parent:
7894                                                                 shown_edges.add((current_node, selected_parent))
7895                                                                 traversed_nodes.add(selected_parent)
7896                                                                 add_parents(selected_parent, False)
7897                                                 display_list.append((current_node,
7898                                                         len(tree_nodes), ordered))
7899                                                 tree_nodes.append(current_node)
7900                                         tree_nodes = []
7901                                         add_parents(graph_key, True)
7902                         else:
7903                                 display_list.append((x, depth, True))
7904                 mylist = display_list
7905                 for x in unsatisfied_blockers:
7906                         mylist.append((x, 0, True))
7907
7908                 last_merge_depth = 0
7909                 for i in xrange(len(mylist)-1,-1,-1):
7910                         graph_key, depth, ordered = mylist[i]
7911                         if not ordered and depth == 0 and i > 0 \
7912                                 and graph_key == mylist[i-1][0] and \
7913                                 mylist[i-1][1] == 0:
7914                                 # An ordered node got a consecutive duplicate when the tree was
7915                                 # being filled in.
7916                                 del mylist[i]
7917                                 continue
7918                         if ordered and graph_key[-1] != "nomerge":
7919                                 last_merge_depth = depth
7920                                 continue
7921                         if depth >= last_merge_depth or \
7922                                 i < len(mylist) - 1 and \
7923                                 depth >= mylist[i+1][1]:
7924                                         del mylist[i]
7925
7926                 from portage import flatten
7927                 from portage.dep import use_reduce, paren_reduce
7928                 # files to fetch list - avoids counting a same file twice
7929                 # in size display (verbose mode)
7930                 myfetchlist=[]
7931
7932                 # Use this set to detect when all the "repoadd" strings are "[0]"
7933                 # and disable the entire repo display in this case.
7934                 repoadd_set = set()
7935
7936                 for mylist_index in xrange(len(mylist)):
7937                         x, depth, ordered = mylist[mylist_index]
7938                         pkg_type = x[0]
7939                         myroot = x[1]
7940                         pkg_key = x[2]
7941                         portdb = self.trees[myroot]["porttree"].dbapi
7942                         bindb  = self.trees[myroot]["bintree"].dbapi
7943                         vardb = self.trees[myroot]["vartree"].dbapi
7944                         vartree = self.trees[myroot]["vartree"]
7945                         pkgsettings = self.pkgsettings[myroot]
7946
7947                         fetch=" "
7948                         indent = " " * depth
7949
7950                         if isinstance(x, Blocker):
7951                                 if x.satisfied:
7952                                         blocker_style = "PKG_BLOCKER_SATISFIED"
7953                                         addl = "%s  %s  " % (colorize(blocker_style, "b"), fetch)
7954                                 else:
7955                                         blocker_style = "PKG_BLOCKER"
7956                                         addl = "%s  %s  " % (colorize(blocker_style, "B"), fetch)
7957                                 if ordered:
7958                                         counters.blocks += 1
7959                                         if x.satisfied:
7960                                                 counters.blocks_satisfied += 1
7961                                 resolved = portage.key_expand(
7962                                         str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
7963                                 if "--columns" in self.myopts and "--quiet" in self.myopts:
7964                                         addl += " " + colorize(blocker_style, resolved)
7965                                 else:
7966                                         addl = "[%s %s] %s%s" % \
7967                                                 (colorize(blocker_style, "blocks"),
7968                                                 addl, indent, colorize(blocker_style, resolved))
7969                                 block_parents = self._blocker_parents.parent_nodes(x)
7970                                 block_parents = set([pnode[2] for pnode in block_parents])
7971                                 block_parents = ", ".join(block_parents)
7972                                 if resolved!=x[2]:
7973                                         addl += colorize(blocker_style,
7974                                                 " (\"%s\" is blocking %s)") % \
7975                                                 (str(x.atom).lstrip("!"), block_parents)
7976                                 else:
7977                                         addl += colorize(blocker_style,
7978                                                 " (is blocking %s)") % block_parents
7979                                 if isinstance(x, Blocker) and x.satisfied:
7980                                         if columns:
7981                                                 continue
7982                                         p.append(addl)
7983                                 else:
7984                                         blockers.append(addl)
7985                         else:
7986                                 pkg_status = x[3]
7987                                 pkg_merge = ordered and pkg_status == "merge"
7988                                 if not pkg_merge and pkg_status == "merge":
7989                                         pkg_status = "nomerge"
7990                                 built = pkg_type != "ebuild"
7991                                 installed = pkg_type == "installed"
7992                                 pkg = x
7993                                 metadata = pkg.metadata
7994                                 ebuild_path = None
7995                                 repo_name = metadata["repository"]
7996                                 if pkg_type == "ebuild":
7997                                         ebuild_path = portdb.findname(pkg_key)
7998                                         if not ebuild_path: # shouldn't happen
7999                                                 raise portage.exception.PackageNotFound(pkg_key)
8000                                         repo_path_real = os.path.dirname(os.path.dirname(
8001                                                 os.path.dirname(ebuild_path)))
8002                                 else:
8003                                         repo_path_real = portdb.getRepositoryPath(repo_name)
8004                                 pkg_use = list(pkg.use.enabled)
8005                                 try:
8006                                         restrict = flatten(use_reduce(paren_reduce(
8007                                                 pkg.metadata["RESTRICT"]), uselist=pkg_use))
8008                                 except portage.exception.InvalidDependString, e:
8009                                         if not pkg.installed:
8010                                                 show_invalid_depstring_notice(x,
8011                                                         pkg.metadata["RESTRICT"], str(e))
8012                                                 del e
8013                                                 return 1
8014                                         restrict = []
8015                                 if "ebuild" == pkg_type and x[3] != "nomerge" and \
8016                                         "fetch" in restrict:
8017                                         fetch = red("F")
8018                                         if ordered:
8019                                                 counters.restrict_fetch += 1
8020                                         if portdb.fetch_check(pkg_key, pkg_use):
8021                                                 fetch = green("f")
8022                                                 if ordered:
8023                                                         counters.restrict_fetch_satisfied += 1
8024
8025                                 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
8026                                 #param is used for -u, where you still *do* want to see when something is being upgraded.
8027                                 myoldbest = []
8028                                 myinslotlist = None
8029                                 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
8030                                 if vardb.cpv_exists(pkg_key):
8031                                         addl="  "+yellow("R")+fetch+"  "
8032                                         if ordered:
8033                                                 if pkg_merge:
8034                                                         counters.reinst += 1
8035                                                 elif pkg_status == "uninstall":
8036                                                         counters.uninst += 1
8037                                 # filter out old-style virtual matches
8038                                 elif installed_versions and \
8039                                         portage.cpv_getkey(installed_versions[0]) == \
8040                                         portage.cpv_getkey(pkg_key):
8041                                         myinslotlist = vardb.match(pkg.slot_atom)
8042                                         # If this is the first install of a new-style virtual, we
8043                                         # need to filter out old-style virtual matches.
8044                                         if myinslotlist and \
8045                                                 portage.cpv_getkey(myinslotlist[0]) != \
8046                                                 portage.cpv_getkey(pkg_key):
8047                                                 myinslotlist = None
8048                                         if myinslotlist:
8049                                                 myoldbest = myinslotlist[:]
8050                                                 addl = "   " + fetch
8051                                                 if not portage.dep.cpvequal(pkg_key,
8052                                                         portage.best([pkg_key] + myoldbest)):
8053                                                         # Downgrade in slot
8054                                                         addl += turquoise("U")+blue("D")
8055                                                         if ordered:
8056                                                                 counters.downgrades += 1
8057                                                 else:
8058                                                         # Update in slot
8059                                                         addl += turquoise("U") + " "
8060                                                         if ordered:
8061                                                                 counters.upgrades += 1
8062                                         else:
8063                                                 # New slot, mark it new.
8064                                                 addl = " " + green("NS") + fetch + "  "
8065                                                 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
8066                                                 if ordered:
8067                                                         counters.newslot += 1
8068
8069                                         if "--changelog" in self.myopts:
8070                                                 inst_matches = vardb.match(pkg.slot_atom)
8071                                                 if inst_matches:
8072                                                         changelogs.extend(self.calc_changelog(
8073                                                                 portdb.findname(pkg_key),
8074                                                                 inst_matches[0], pkg_key))
8075                                 else:
8076                                         addl = " " + green("N") + " " + fetch + "  "
8077                                         if ordered:
8078                                                 counters.new += 1
8079
8080                                 verboseadd = ""
8081                                 repoadd = None
8082
8083                                 if True:
8084                                         # USE flag display
8085                                         forced_flags = set()
8086                                         pkgsettings.setcpv(pkg) # for package.use.{mask,force}
8087                                         forced_flags.update(pkgsettings.useforce)
8088                                         forced_flags.update(pkgsettings.usemask)
8089
8090                                         cur_use = [flag for flag in pkg.use.enabled \
8091                                                 if flag in pkg.iuse.all]
8092                                         cur_iuse = sorted(pkg.iuse.all)
8093
8094                                         if myoldbest and myinslotlist:
8095                                                 previous_cpv = myoldbest[0]
8096                                         else:
8097                                                 previous_cpv = pkg.cpv
8098                                         if vardb.cpv_exists(previous_cpv):
8099                                                 old_iuse, old_use = vardb.aux_get(
8100                                                                 previous_cpv, ["IUSE", "USE"])
8101                                                 old_iuse = list(set(
8102                                                         filter_iuse_defaults(old_iuse.split())))
8103                                                 old_iuse.sort()
8104                                                 old_use = old_use.split()
8105                                                 is_new = False
8106                                         else:
8107                                                 old_iuse = []
8108                                                 old_use = []
8109                                                 is_new = True
8110
8111                                         old_use = [flag for flag in old_use if flag in old_iuse]
8112
8113                                         use_expand = pkgsettings["USE_EXPAND"].lower().split()
8114                                         use_expand.sort()
8115                                         use_expand.reverse()
8116                                         use_expand_hidden = \
8117                                                 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
8118
8119                                         def map_to_use_expand(myvals, forcedFlags=False,
8120                                                 removeHidden=True):
8121                                                 ret = {}
8122                                                 forced = {}
8123                                                 for exp in use_expand:
8124                                                         ret[exp] = []
8125                                                         forced[exp] = set()
8126                                                         for val in myvals[:]:
8127                                                                 if val.startswith(exp.lower()+"_"):
8128                                                                         if val in forced_flags:
8129                                                                                 forced[exp].add(val[len(exp)+1:])
8130                                                                         ret[exp].append(val[len(exp)+1:])
8131                                                                         myvals.remove(val)
8132                                                 ret["USE"] = myvals
8133                                                 forced["USE"] = [val for val in myvals \
8134                                                         if val in forced_flags]
8135                                                 if removeHidden:
8136                                                         for exp in use_expand_hidden:
8137                                                                 ret.pop(exp, None)
8138                                                 if forcedFlags:
8139                                                         return ret, forced
8140                                                 return ret
8141
8142                                         # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
8143                                         # are the only thing that triggered reinstallation.
8144                                         reinst_flags_map = {}
8145                                         reinstall_for_flags = self._reinstall_nodes.get(pkg)
8146                                         reinst_expand_map = None
8147                                         if reinstall_for_flags:
8148                                                 reinst_flags_map = map_to_use_expand(
8149                                                         list(reinstall_for_flags), removeHidden=False)
8150                                                 for k in list(reinst_flags_map):
8151                                                         if not reinst_flags_map[k]:
8152                                                                 del reinst_flags_map[k]
8153                                                 if not reinst_flags_map.get("USE"):
8154                                                         reinst_expand_map = reinst_flags_map.copy()
8155                                                         reinst_expand_map.pop("USE", None)
8156                                         if reinst_expand_map and \
8157                                                 not set(reinst_expand_map).difference(
8158                                                 use_expand_hidden):
8159                                                 use_expand_hidden = \
8160                                                         set(use_expand_hidden).difference(
8161                                                         reinst_expand_map)
8162
8163                                         cur_iuse_map, iuse_forced = \
8164                                                 map_to_use_expand(cur_iuse, forcedFlags=True)
8165                                         cur_use_map = map_to_use_expand(cur_use)
8166                                         old_iuse_map = map_to_use_expand(old_iuse)
8167                                         old_use_map = map_to_use_expand(old_use)
8168
8169                                         use_expand.sort()
8170                                         use_expand.insert(0, "USE")
8171                                         
8172                                         for key in use_expand:
8173                                                 if key in use_expand_hidden:
8174                                                         continue
8175                                                 verboseadd += create_use_string(key.upper(),
8176                                                         cur_iuse_map[key], iuse_forced[key],
8177                                                         cur_use_map[key], old_iuse_map[key],
8178                                                         old_use_map[key], is_new,
8179                                                         reinst_flags_map.get(key))
8180
8181                                 if verbosity == 3:
8182                                         # size verbose
8183                                         mysize=0
8184                                         if pkg_type == "ebuild" and pkg_merge:
8185                                                 try:
8186                                                         myfilesdict = portdb.getfetchsizes(pkg_key,
8187                                                                 useflags=pkg_use, debug=self.edebug)
8188                                                 except portage.exception.InvalidDependString, e:
8189                                                         src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
8190                                                         show_invalid_depstring_notice(x, src_uri, str(e))
8191                                                         del e
8192                                                         return 1
8193                                                 if myfilesdict is None:
8194                                                         myfilesdict="[empty/missing/bad digest]"
8195                                                 else:
8196                                                         for myfetchfile in myfilesdict:
8197                                                                 if myfetchfile not in myfetchlist:
8198                                                                         mysize+=myfilesdict[myfetchfile]
8199                                                                         myfetchlist.append(myfetchfile)
8200                                                         if ordered:
8201                                                                 counters.totalsize += mysize
8202                                                 verboseadd += format_size(mysize)
8203
8204                                         # overlay verbose
8205                                         # assign index for a previous version in the same slot
8206                                         has_previous = False
8207                                         repo_name_prev = None
8208                                         slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
8209                                                 metadata["SLOT"])
8210                                         slot_matches = vardb.match(slot_atom)
8211                                         if slot_matches:
8212                                                 has_previous = True
8213                                                 repo_name_prev = vardb.aux_get(slot_matches[0],
8214                                                         ["repository"])[0]
8215
8216                                         # now use the data to generate output
8217                                         if pkg.installed or not has_previous:
8218                                                 repoadd = repo_display.repoStr(repo_path_real)
8219                                         else:
8220                                                 repo_path_prev = None
8221                                                 if repo_name_prev:
8222                                                         repo_path_prev = portdb.getRepositoryPath(
8223                                                                 repo_name_prev)
8224                                                 if repo_path_prev == repo_path_real:
8225                                                         repoadd = repo_display.repoStr(repo_path_real)
8226                                                 else:
8227                                                         repoadd = "%s=>%s" % (
8228                                                                 repo_display.repoStr(repo_path_prev),
8229                                                                 repo_display.repoStr(repo_path_real))
8230                                         if repoadd:
8231                                                 repoadd_set.add(repoadd)
8232
8233                                 xs = [portage.cpv_getkey(pkg_key)] + \
8234                                         list(portage.catpkgsplit(pkg_key)[2:])
8235                                 if xs[2] == "r0":
8236                                         xs[2] = ""
8237                                 else:
8238                                         xs[2] = "-" + xs[2]
8239
8240                                 mywidth = 130
8241                                 if "COLUMNWIDTH" in self.settings:
8242                                         try:
8243                                                 mywidth = int(self.settings["COLUMNWIDTH"])
8244                                         except ValueError, e:
8245                                                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8246                                                 portage.writemsg(
8247                                                         "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
8248                                                         self.settings["COLUMNWIDTH"], noiselevel=-1)
8249                                                 del e
8250                                 oldlp = mywidth - 30
8251                                 newlp = oldlp - 30
8252
8253                                 # Convert myoldbest from a list to a string.
8254                                 if not myoldbest:
8255                                         myoldbest = ""
8256                                 else:
8257                                         for pos, key in enumerate(myoldbest):
8258                                                 key = portage.catpkgsplit(key)[2] + \
8259                                                         "-" + portage.catpkgsplit(key)[3]
8260                                                 if key[-3:] == "-r0":
8261                                                         key = key[:-3]
8262                                                 myoldbest[pos] = key
8263                                         myoldbest = blue("["+", ".join(myoldbest)+"]")
8264
8265                                 pkg_cp = xs[0]
8266                                 root_config = self.roots[myroot]
8267                                 system_set = root_config.sets["system"]
8268                                 world_set  = root_config.sets["world"]
8269
8270                                 pkg_system = False
8271                                 pkg_world = False
8272                                 try:
8273                                         pkg_system = system_set.findAtomForPackage(pkg)
8274                                         pkg_world  = world_set.findAtomForPackage(pkg)
8275                                         if not (oneshot or pkg_world) and \
8276                                                 myroot == self.target_root and \
8277                                                 favorites_set.findAtomForPackage(pkg):
8278                                                 # Maybe it will be added to world now.
8279                                                 if create_world_atom(pkg, favorites_set, root_config):
8280                                                         pkg_world = True
8281                                 except portage.exception.InvalidDependString:
8282                                         # This is reported elsewhere if relevant.
8283                                         pass
8284
8285                                 def pkgprint(pkg_str):
8286                                         if pkg_merge:
8287                                                 if pkg_system:
8288                                                         return colorize("PKG_MERGE_SYSTEM", pkg_str)
8289                                                 elif pkg_world:
8290                                                         return colorize("PKG_MERGE_WORLD", pkg_str)
8291                                                 else:
8292                                                         return colorize("PKG_MERGE", pkg_str)
8293                                         elif pkg_status == "uninstall":
8294                                                 return colorize("PKG_UNINSTALL", pkg_str)
8295                                         else:
8296                                                 if pkg_system:
8297                                                         return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8298                                                 elif pkg_world:
8299                                                         return colorize("PKG_NOMERGE_WORLD", pkg_str)
8300                                                 else:
8301                                                         return colorize("PKG_NOMERGE", pkg_str)
8302
8303                                 try:
8304                                         properties = flatten(use_reduce(paren_reduce(
8305                                                 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8306                                 except portage.exception.InvalidDependString, e:
8307                                         if not pkg.installed:
8308                                                 show_invalid_depstring_notice(pkg,
8309                                                         pkg.metadata["PROPERTIES"], str(e))
8310                                                 del e
8311                                                 return 1
8312                                         properties = []
8313                                 interactive = "interactive" in properties
8314                                 if interactive and pkg.operation == "merge":
8315                                         addl = colorize("WARN", "I") + addl[1:]
8316                                         if ordered:
8317                                                 counters.interactive += 1
8318
8319                                 if x[1]!="/":
8320                                         if myoldbest:
8321                                                 myoldbest +=" "
8322                                         if "--columns" in self.myopts:
8323                                                 if "--quiet" in self.myopts:
8324                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8325                                                         myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8326                                                         myprint=myprint+myoldbest
8327                                                         myprint=myprint+darkgreen("to "+x[1])
8328                                                         verboseadd = None
8329                                                 else:
8330                                                         if not pkg_merge:
8331                                                                 myprint = "[%s] %s%s" % \
8332                                                                         (pkgprint(pkg_status.ljust(13)),
8333                                                                         indent, pkgprint(pkg.cp))
8334                                                         else:
8335                                                                 myprint = "[%s %s] %s%s" % \
8336                                                                         (pkgprint(pkg.type_name), addl,
8337                                                                         indent, pkgprint(pkg.cp))
8338                                                         if (newlp-nc_len(myprint)) > 0:
8339                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8340                                                         myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8341                                                         if (oldlp-nc_len(myprint)) > 0:
8342                                                                 myprint=myprint+" "*(oldlp-nc_len(myprint))
8343                                                         myprint=myprint+myoldbest
8344                                                         myprint += darkgreen("to " + pkg.root)
8345                                         else:
8346                                                 if not pkg_merge:
8347                                                         myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8348                                                 else:
8349                                                         myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
8350                                                 myprint += indent + pkgprint(pkg_key) + " " + \
8351                                                         myoldbest + darkgreen("to " + myroot)
8352                                 else:
8353                                         if "--columns" in self.myopts:
8354                                                 if "--quiet" in self.myopts:
8355                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8356                                                         myprint=myprint+" "+green(xs[1]+xs[2])+" "
8357                                                         myprint=myprint+myoldbest
8358                                                         verboseadd = None
8359                                                 else:
8360                                                         if not pkg_merge:
8361                                                                 myprint = "[%s] %s%s" % \
8362                                                                         (pkgprint(pkg_status.ljust(13)),
8363                                                                         indent, pkgprint(pkg.cp))
8364                                                         else:
8365                                                                 myprint = "[%s %s] %s%s" % \
8366                                                                         (pkgprint(pkg.type_name), addl,
8367                                                                         indent, pkgprint(pkg.cp))
8368                                                         if (newlp-nc_len(myprint)) > 0:
8369                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8370                                                         myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8371                                                         if (oldlp-nc_len(myprint)) > 0:
8372                                                                 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8373                                                         myprint += myoldbest
8374                                         else:
8375                                                 if not pkg_merge:
8376                                                         myprint = "[%s] %s%s %s" % \
8377                                                                 (pkgprint(pkg_status.ljust(13)),
8378                                                                 indent, pkgprint(pkg.cpv),
8379                                                                 myoldbest)
8380                                                 else:
8381                                                         myprint = "[%s %s] %s%s %s" % \
8382                                                                 (pkgprint(pkg_type), addl, indent,
8383                                                                 pkgprint(pkg.cpv), myoldbest)
8384
8385                                 if columns and pkg.operation == "uninstall":
8386                                         continue
8387                                 p.append((myprint, verboseadd, repoadd))
8388
8389                                 if "--tree" not in self.myopts and \
8390                                         "--quiet" not in self.myopts and \
8391                                         not self._opts_no_restart.intersection(self.myopts) and \
8392                                         pkg.root == self._running_root.root and \
8393                                         portage.match_from_list(
8394                                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8395                                         not vardb.cpv_exists(pkg.cpv) and \
8396                                         "--quiet" not in self.myopts:
8397                                                 if mylist_index < len(mylist) - 1:
8398                                                         p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8399                                                         p.append(colorize("WARN", "    then resume the merge."))
8400
8401                 out = sys.stdout
8402                 show_repos = repoadd_set and repoadd_set != set(["0"])
8403
8404                 for x in p:
8405                         if isinstance(x, basestring):
8406                                 out.write("%s\n" % (x,))
8407                                 continue
8408
8409                         myprint, verboseadd, repoadd = x
8410
8411                         if verboseadd:
8412                                 myprint += " " + verboseadd
8413
8414                         if show_repos and repoadd:
8415                                 myprint += " " + teal("[%s]" % repoadd)
8416
8417                         out.write("%s\n" % (myprint,))
8418
8419                 for x in blockers:
8420                         print x
8421
8422                 if verbosity == 3:
8423                         print
8424                         print counters
8425                         if show_repos:
8426                                 sys.stdout.write(str(repo_display))
8427
8428                 if "--changelog" in self.myopts:
8429                         print
8430                         for revision,text in changelogs:
8431                                 print bold('*'+revision)
8432                                 sys.stdout.write(text)
8433
8434                 sys.stdout.flush()
8435                 return os.EX_OK
8436
8437         def display_problems(self):
8438                 """
8439                 Display problems with the dependency graph such as slot collisions.
8440                 This is called internally by display() to show the problems _after_
8441                 the merge list where it is most likely to be seen, but if display()
8442                 is not going to be called then this method should be called explicitly
8443                 to ensure that the user is notified of problems with the graph.
8444
8445                 All output goes to stderr, except for unsatisfied dependencies which
8446                 go to stdout for parsing by programs such as autounmask.
8447                 """
8448
8449                 # Note that show_masked_packages() sends it's output to
8450                 # stdout, and some programs such as autounmask parse the
8451                 # output in cases when emerge bails out. However, when
8452                 # show_masked_packages() is called for installed packages
8453                 # here, the message is a warning that is more appropriate
8454                 # to send to stderr, so temporarily redirect stdout to
8455                 # stderr. TODO: Fix output code so there's a cleaner way
8456                 # to redirect everything to stderr.
8457                 sys.stdout.flush()
8458                 sys.stderr.flush()
8459                 stdout = sys.stdout
8460                 try:
8461                         sys.stdout = sys.stderr
8462                         self._display_problems()
8463                 finally:
8464                         sys.stdout = stdout
8465                         sys.stdout.flush()
8466                         sys.stderr.flush()
8467
8468                 # This goes to stdout for parsing by programs like autounmask.
8469                 for pargs, kwargs in self._unsatisfied_deps_for_display:
8470                         self._show_unsatisfied_dep(*pargs, **kwargs)
8471
8472         def _display_problems(self):
8473                 if self._circular_deps_for_display is not None:
8474                         self._show_circular_deps(
8475                                 self._circular_deps_for_display)
8476
8477                 # The user is only notified of a slot conflict if
8478                 # there are no unresolvable blocker conflicts.
8479                 if self._unsatisfied_blockers_for_display is not None:
8480                         self._show_unsatisfied_blockers(
8481                                 self._unsatisfied_blockers_for_display)
8482                 else:
8483                         self._show_slot_collision_notice()
8484
8485                 # TODO: Add generic support for "set problem" handlers so that
8486                 # the below warnings aren't special cases for world only.
8487
8488                 if self._missing_args:
8489                         world_problems = False
8490                         if "world" in self._sets:
8491                                 # Filter out indirect members of world (from nested sets)
8492                                 # since only direct members of world are desired here.
8493                                 world_set = self.roots[self.target_root].sets["world"]
8494                                 for arg, atom in self._missing_args:
8495                                         if arg.name == "world" and atom in world_set:
8496                                                 world_problems = True
8497                                                 break
8498
8499                         if world_problems:
8500                                 sys.stderr.write("\n!!! Problems have been " + \
8501                                         "detected with your world file\n")
8502                                 sys.stderr.write("!!! Please run " + \
8503                                         green("emaint --check world")+"\n\n")
8504
8505                 if self._missing_args:
8506                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8507                                 " Ebuilds for the following packages are either all\n")
8508                         sys.stderr.write(colorize("BAD", "!!!") + \
8509                                 " masked or don't exist:\n")
8510                         sys.stderr.write(" ".join(str(atom) for arg, atom in \
8511                                 self._missing_args) + "\n")
8512
8513                 if self._pprovided_args:
8514                         arg_refs = {}
8515                         for arg, atom in self._pprovided_args:
8516                                 if isinstance(arg, SetArg):
8517                                         parent = arg.name
8518                                         arg_atom = (atom, atom)
8519                                 else:
8520                                         parent = "args"
8521                                         arg_atom = (arg.arg, atom)
8522                                 refs = arg_refs.setdefault(arg_atom, [])
8523                                 if parent not in refs:
8524                                         refs.append(parent)
8525                         msg = []
8526                         msg.append(bad("\nWARNING: "))
8527                         if len(self._pprovided_args) > 1:
8528                                 msg.append("Requested packages will not be " + \
8529                                         "merged because they are listed in\n")
8530                         else:
8531                                 msg.append("A requested package will not be " + \
8532                                         "merged because it is listed in\n")
8533                         msg.append("package.provided:\n\n")
8534                         problems_sets = set()
8535                         for (arg, atom), refs in arg_refs.iteritems():
8536                                 ref_string = ""
8537                                 if refs:
8538                                         problems_sets.update(refs)
8539                                         refs.sort()
8540                                         ref_string = ", ".join(["'%s'" % name for name in refs])
8541                                         ref_string = " pulled in by " + ref_string
8542                                 msg.append("  %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8543                         msg.append("\n")
8544                         if "world" in problems_sets:
8545                                 msg.append("This problem can be solved in one of the following ways:\n\n")
8546                                 msg.append("  A) Use emaint to clean offending packages from world (if not installed).\n")
8547                                 msg.append("  B) Uninstall offending packages (cleans them from world).\n")
8548                                 msg.append("  C) Remove offending entries from package.provided.\n\n")
8549                                 msg.append("The best course of action depends on the reason that an offending\n")
8550                                 msg.append("package.provided entry exists.\n\n")
8551                         sys.stderr.write("".join(msg))
8552
8553                 masked_packages = []
8554                 for pkg in self._masked_installed:
8555                         root_config = pkg.root_config
8556                         pkgsettings = self.pkgsettings[pkg.root]
8557                         mreasons = get_masking_status(pkg, pkgsettings, root_config)
8558                         masked_packages.append((root_config, pkgsettings,
8559                                 pkg.cpv, pkg.metadata, mreasons))
8560                 if masked_packages:
8561                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8562                                 " The following installed packages are masked:\n")
8563                         show_masked_packages(masked_packages)
8564                         show_mask_docs()
8565                         print
8566
8567         def calc_changelog(self,ebuildpath,current,next):
8568                 if ebuildpath == None or not os.path.exists(ebuildpath):
8569                         return []
8570                 current = '-'.join(portage.catpkgsplit(current)[1:])
8571                 if current.endswith('-r0'):
8572                         current = current[:-3]
8573                 next = '-'.join(portage.catpkgsplit(next)[1:])
8574                 if next.endswith('-r0'):
8575                         next = next[:-3]
8576                 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8577                 try:
8578                         changelog = open(changelogpath).read()
8579                 except SystemExit, e:
8580                         raise # Needed else can't exit
8581                 except:
8582                         return []
8583                 divisions = self.find_changelog_tags(changelog)
8584                 #print 'XX from',current,'to',next
8585                 #for div,text in divisions: print 'XX',div
8586                 # skip entries for all revisions above the one we are about to emerge
8587                 for i in range(len(divisions)):
8588                         if divisions[i][0]==next:
8589                                 divisions = divisions[i:]
8590                                 break
8591                 # find out how many entries we are going to display
8592                 for i in range(len(divisions)):
8593                         if divisions[i][0]==current:
8594                                 divisions = divisions[:i]
8595                                 break
8596                 else:
8597                     # couldnt find the current revision in the list. display nothing
8598                         return []
8599                 return divisions
8600
8601         def find_changelog_tags(self,changelog):
8602                 divs = []
8603                 release = None
8604                 while 1:
8605                         match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8606                         if match is None:
8607                                 if release is not None:
8608                                         divs.append((release,changelog))
8609                                 return divs
8610                         if release is not None:
8611                                 divs.append((release,changelog[:match.start()]))
8612                         changelog = changelog[match.end():]
8613                         release = match.group(1)
8614                         if release.endswith('.ebuild'):
8615                                 release = release[:-7]
8616                         if release.endswith('-r0'):
8617                                 release = release[:-3]
8618
8619         def saveNomergeFavorites(self):
8620                 """Find atoms in favorites that are not in the mergelist and add them
8621                 to the world file if necessary."""
8622                 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8623                         "--oneshot", "--onlydeps", "--pretend"):
8624                         if x in self.myopts:
8625                                 return
8626                 root_config = self.roots[self.target_root]
8627                 world_set = root_config.sets["world"]
8628
8629                 world_locked = False
8630                 if hasattr(world_set, "lock"):
8631                         world_set.lock()
8632                         world_locked = True
8633
8634                 if hasattr(world_set, "load"):
8635                         world_set.load() # maybe it's changed on disk
8636
8637                 args_set = self._sets["args"]
8638                 portdb = self.trees[self.target_root]["porttree"].dbapi
8639                 added_favorites = set()
8640                 for x in self._set_nodes:
8641                         pkg_type, root, pkg_key, pkg_status = x
8642                         if pkg_status != "nomerge":
8643                                 continue
8644
8645                         try:
8646                                 myfavkey = create_world_atom(x, args_set, root_config)
8647                                 if myfavkey:
8648                                         if myfavkey in added_favorites:
8649                                                 continue
8650                                         added_favorites.add(myfavkey)
8651                         except portage.exception.InvalidDependString, e:
8652                                 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8653                                         (pkg_key, str(e)), noiselevel=-1)
8654                                 writemsg("!!! see '%s'\n\n" % os.path.join(
8655                                         root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8656                                 del e
8657                 all_added = []
8658                 for k in self._sets:
8659                         if k in ("args", "world") or not root_config.sets[k].world_candidate:
8660                                 continue
8661                         s = SETPREFIX + k
8662                         if s in world_set:
8663                                 continue
8664                         all_added.append(SETPREFIX + k)
8665                 all_added.extend(added_favorites)
8666                 all_added.sort()
8667                 for a in all_added:
8668                         print ">>> Recording %s in \"world\" favorites file..." % \
8669                                 colorize("INFORM", str(a))
8670                 if all_added:
8671                         world_set.update(all_added)
8672
8673                 if world_locked:
8674                         world_set.unlock()
8675
8676         def loadResumeCommand(self, resume_data, skip_masked=False):
8677                 """
8678                 Add a resume command to the graph and validate it in the process.  This
8679                 will raise a PackageNotFound exception if a package is not available.
8680                 """
8681
8682                 if not isinstance(resume_data, dict):
8683                         return False
8684
8685                 mergelist = resume_data.get("mergelist")
8686                 if not isinstance(mergelist, list):
8687                         mergelist = []
8688
8689                 fakedb = self.mydbapi
8690                 trees = self.trees
8691                 serialized_tasks = []
8692                 masked_tasks = []
8693                 for x in mergelist:
8694                         if not (isinstance(x, list) and len(x) == 4):
8695                                 continue
8696                         pkg_type, myroot, pkg_key, action = x
8697                         if pkg_type not in self.pkg_tree_map:
8698                                 continue
8699                         if action != "merge":
8700                                 continue
8701                         tree_type = self.pkg_tree_map[pkg_type]
8702                         mydb = trees[myroot][tree_type].dbapi
8703                         db_keys = list(self._trees_orig[myroot][
8704                                 tree_type].dbapi._aux_cache_keys)
8705                         try:
8706                                 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8707                         except KeyError:
8708                                 # It does no exist or it is corrupt.
8709                                 if action == "uninstall":
8710                                         continue
8711                                 raise portage.exception.PackageNotFound(pkg_key)
8712                         installed = action == "uninstall"
8713                         built = pkg_type != "ebuild"
8714                         root_config = self.roots[myroot]
8715                         pkg = Package(built=built, cpv=pkg_key,
8716                                 installed=installed, metadata=metadata,
8717                                 operation=action, root_config=root_config,
8718                                 type_name=pkg_type)
8719                         if pkg_type == "ebuild":
8720                                 pkgsettings = self.pkgsettings[myroot]
8721                                 pkgsettings.setcpv(pkg)
8722                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8723                                 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
8724                         self._pkg_cache[pkg] = pkg
8725
8726                         root_config = self.roots[pkg.root]
8727                         if "merge" == pkg.operation and \
8728                                 not visible(root_config.settings, pkg):
8729                                 if skip_masked:
8730                                         masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8731                                 else:
8732                                         self._unsatisfied_deps_for_display.append(
8733                                                 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8734
8735                         fakedb[myroot].cpv_inject(pkg)
8736                         serialized_tasks.append(pkg)
8737                         self.spinner.update()
8738
8739                 if self._unsatisfied_deps_for_display:
8740                         return False
8741
8742                 if not serialized_tasks or "--nodeps" in self.myopts:
8743                         self._serialized_tasks_cache = serialized_tasks
8744                         self._scheduler_graph = self.digraph
8745                 else:
8746                         self._select_package = self._select_pkg_from_graph
8747                         self.myparams.add("selective")
8748                         # Always traverse deep dependencies in order to account for
8749                         # potentially unsatisfied dependencies of installed packages.
8750                         # This is necessary for correct --keep-going or --resume operation
8751                         # in case a package from a group of circularly dependent packages
8752                         # fails. In this case, a package which has recently been installed
8753                         # may have an unsatisfied circular dependency (pulled in by
8754                         # PDEPEND, for example). So, even though a package is already
8755                         # installed, it may not have all of it's dependencies satisfied, so
8756                         # it may not be usable. If such a package is in the subgraph of
8757                         # deep depenedencies of a scheduled build, that build needs to
8758                         # be cancelled. In order for this type of situation to be
8759                         # recognized, deep traversal of dependencies is required.
8760                         self.myparams.add("deep")
8761
8762                         favorites = resume_data.get("favorites")
8763                         args_set = self._sets["args"]
8764                         if isinstance(favorites, list):
8765                                 args = self._load_favorites(favorites)
8766                         else:
8767                                 args = []
8768
8769                         for task in serialized_tasks:
8770                                 if isinstance(task, Package) and \
8771                                         task.operation == "merge":
8772                                         if not self._add_pkg(task, None):
8773                                                 return False
8774
8775                         # Packages for argument atoms need to be explicitly
8776                         # added via _add_pkg() so that they are included in the
8777                         # digraph (needed at least for --tree display).
8778                         for arg in args:
8779                                 for atom in arg.set:
8780                                         pkg, existing_node = self._select_package(
8781                                                 arg.root_config.root, atom)
8782                                         if existing_node is None and \
8783                                                 pkg is not None:
8784                                                 if not self._add_pkg(pkg, Dependency(atom=atom,
8785                                                         root=pkg.root, parent=arg)):
8786                                                         return False
8787
8788                         # Allow unsatisfied deps here to avoid showing a masking
8789                         # message for an unsatisfied dep that isn't necessarily
8790                         # masked.
8791                         if not self._create_graph(allow_unsatisfied=True):
8792                                 return False
8793
8794                         unsatisfied_deps = []
8795                         for dep in self._unsatisfied_deps:
8796                                 if not isinstance(dep.parent, Package):
8797                                         continue
8798                                 if dep.parent.operation == "merge":
8799                                         unsatisfied_deps.append(dep)
8800                                         continue
8801
8802                                 # For unsatisfied deps of installed packages, only account for
8803                                 # them if they are in the subgraph of dependencies of a package
8804                                 # which is scheduled to be installed.
8805                                 unsatisfied_install = False
8806                                 traversed = set()
8807                                 dep_stack = self.digraph.parent_nodes(dep.parent)
8808                                 while dep_stack:
8809                                         node = dep_stack.pop()
8810                                         if not isinstance(node, Package):
8811                                                 continue
8812                                         if node.operation == "merge":
8813                                                 unsatisfied_install = True
8814                                                 break
8815                                         if node in traversed:
8816                                                 continue
8817                                         traversed.add(node)
8818                                         dep_stack.extend(self.digraph.parent_nodes(node))
8819
8820                                 if unsatisfied_install:
8821                                         unsatisfied_deps.append(dep)
8822
8823                         if masked_tasks or unsatisfied_deps:
8824                                 # This probably means that a required package
8825                                 # was dropped via --skipfirst. It makes the
8826                                 # resume list invalid, so convert it to a
8827                                 # UnsatisfiedResumeDep exception.
8828                                 raise self.UnsatisfiedResumeDep(self,
8829                                         masked_tasks + unsatisfied_deps)
8830                         self._serialized_tasks_cache = None
8831                         try:
8832                                 self.altlist()
8833                         except self._unknown_internal_error:
8834                                 return False
8835
8836                 return True
8837
8838         def _load_favorites(self, favorites):
8839                 """
8840                 Use a list of favorites to resume state from a
8841                 previous select_files() call. This creates similar
8842                 DependencyArg instances to those that would have
8843                 been created by the original select_files() call.
8844                 This allows Package instances to be matched with
8845                 DependencyArg instances during graph creation.
8846                 """
8847                 root_config = self.roots[self.target_root]
8848                 getSetAtoms = root_config.setconfig.getSetAtoms
8849                 sets = root_config.sets
8850                 args = []
8851                 for x in favorites:
8852                         if not isinstance(x, basestring):
8853                                 continue
8854                         if x in ("system", "world"):
8855                                 x = SETPREFIX + x
8856                         if x.startswith(SETPREFIX):
8857                                 s = x[len(SETPREFIX):]
8858                                 if s not in sets:
8859                                         continue
8860                                 if s in self._sets:
8861                                         continue
8862                                 # Recursively expand sets so that containment tests in
8863                                 # self._get_parent_sets() properly match atoms in nested
8864                                 # sets (like if world contains system).
8865                                 expanded_set = InternalPackageSet(
8866                                         initial_atoms=getSetAtoms(s))
8867                                 self._sets[s] = expanded_set
8868                                 args.append(SetArg(arg=x, set=expanded_set,
8869                                         root_config=root_config))
8870                         else:
8871                                 if not portage.isvalidatom(x):
8872                                         continue
8873                                 args.append(AtomArg(arg=x, atom=x,
8874                                         root_config=root_config))
8875
8876                 self._set_args(args)
8877                 return args
8878
8879         class UnsatisfiedResumeDep(portage.exception.PortageException):
8880                 """
8881                 A dependency of a resume list is not installed. This
8882                 can occur when a required package is dropped from the
8883                 merge list via --skipfirst.
8884                 """
8885                 def __init__(self, depgraph, value):
8886                         portage.exception.PortageException.__init__(self, value)
8887                         self.depgraph = depgraph
8888
8889         class _internal_exception(portage.exception.PortageException):
8890                 def __init__(self, value=""):
8891                         portage.exception.PortageException.__init__(self, value)
8892
8893         class _unknown_internal_error(_internal_exception):
8894                 """
8895                 Used by the depgraph internally to terminate graph creation.
8896                 The specific reason for the failure should have been dumped
8897                 to stderr, unfortunately, the exact reason for the failure
8898                 may not be known.
8899                 """
8900
8901         class _serialize_tasks_retry(_internal_exception):
8902                 """
8903                 This is raised by the _serialize_tasks() method when it needs to
8904                 be called again for some reason. The only case that it's currently
8905                 used for is when neglected dependencies need to be added to the
8906                 graph in order to avoid making a potentially unsafe decision.
8907                 """
8908
8909         class _dep_check_composite_db(portage.dbapi):
8910                 """
8911                 A dbapi-like interface that is optimized for use in dep_check() calls.
8912                 This is built on top of the existing depgraph package selection logic.
8913                 Some packages that have been added to the graph may be masked from this
8914                 view in order to influence the atom preference selection that occurs
8915                 via dep_check().
8916                 """
8917                 def __init__(self, depgraph, root):
8918                         portage.dbapi.__init__(self)
8919                         self._depgraph = depgraph
8920                         self._root = root
8921                         self._match_cache = {}
8922                         self._cpv_pkg_map = {}
8923
8924                 def _clear_cache(self):
8925                         self._match_cache.clear()
8926                         self._cpv_pkg_map.clear()
8927
8928                 def match(self, atom):
8929                         ret = self._match_cache.get(atom)
8930                         if ret is not None:
8931                                 return ret[:]
8932                         orig_atom = atom
8933                         if "/" not in atom:
8934                                 atom = self._dep_expand(atom)
8935                         pkg, existing = self._depgraph._select_package(self._root, atom)
8936                         if not pkg:
8937                                 ret = []
8938                         else:
8939                                 # Return the highest available from select_package() as well as
8940                                 # any matching slots in the graph db.
8941                                 slots = set()
8942                                 slots.add(pkg.metadata["SLOT"])
8943                                 atom_cp = portage.dep_getkey(atom)
8944                                 if pkg.cp.startswith("virtual/"):
8945                                         # For new-style virtual lookahead that occurs inside
8946                                         # dep_check(), examine all slots. This is needed
8947                                         # so that newer slots will not unnecessarily be pulled in
8948                                         # when a satisfying lower slot is already installed. For
8949                                         # example, if virtual/jdk-1.4 is satisfied via kaffe then
8950                                         # there's no need to pull in a newer slot to satisfy a
8951                                         # virtual/jdk dependency.
8952                                         for db, pkg_type, built, installed, db_keys in \
8953                                                 self._depgraph._filtered_trees[self._root]["dbs"]:
8954                                                 for cpv in db.match(atom):
8955                                                         if portage.cpv_getkey(cpv) != pkg.cp:
8956                                                                 continue
8957                                                         slots.add(db.aux_get(cpv, ["SLOT"])[0])
8958                                 ret = []
8959                                 if self._visible(pkg):
8960                                         self._cpv_pkg_map[pkg.cpv] = pkg
8961                                         ret.append(pkg.cpv)
8962                                 slots.remove(pkg.metadata["SLOT"])
8963                                 while slots:
8964                                         slot_atom = "%s:%s" % (atom_cp, slots.pop())
8965                                         pkg, existing = self._depgraph._select_package(
8966                                                 self._root, slot_atom)
8967                                         if not pkg:
8968                                                 continue
8969                                         if not self._visible(pkg):
8970                                                 continue
8971                                         self._cpv_pkg_map[pkg.cpv] = pkg
8972                                         ret.append(pkg.cpv)
8973                                 if ret:
8974                                         self._cpv_sort_ascending(ret)
8975                         self._match_cache[orig_atom] = ret
8976                         return ret[:]
8977
8978                 def _visible(self, pkg):
8979                         if pkg.installed and "selective" not in self._depgraph.myparams:
8980                                 try:
8981                                         arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
8982                                 except (StopIteration, portage.exception.InvalidDependString):
8983                                         arg = None
8984                                 if arg:
8985                                         return False
8986                         if pkg.installed:
8987                                 try:
8988                                         if not visible(
8989                                                 self._depgraph.pkgsettings[pkg.root], pkg):
8990                                                 return False
8991                                 except portage.exception.InvalidDependString:
8992                                         pass
8993                         in_graph = self._depgraph._slot_pkg_map[
8994                                 self._root].get(pkg.slot_atom)
8995                         if in_graph is None:
8996                                 # Mask choices for packages which are not the highest visible
8997                                 # version within their slot (since they usually trigger slot
8998                                 # conflicts).
8999                                 highest_visible, in_graph = self._depgraph._select_package(
9000                                         self._root, pkg.slot_atom)
9001                                 if pkg != highest_visible:
9002                                         return False
9003                         elif in_graph != pkg:
9004                                 # Mask choices for packages that would trigger a slot
9005                                 # conflict with a previously selected package.
9006                                 return False
9007                         return True
9008
9009                 def _dep_expand(self, atom):
9010                         """
9011                         This is only needed for old installed packages that may
9012                         contain atoms that are not fully qualified with a specific
9013                         category. Emulate the cpv_expand() function that's used by
9014                         dbapi.match() in cases like this. If there are multiple
9015                         matches, it's often due to a new-style virtual that has
9016                         been added, so try to filter those out to avoid raising
9017                         a ValueError.
9018                         """
9019                         root_config = self._depgraph.roots[self._root]
9020                         orig_atom = atom
9021                         expanded_atoms = self._depgraph._dep_expand(root_config, atom)
9022                         if len(expanded_atoms) > 1:
9023                                 non_virtual_atoms = []
9024                                 for x in expanded_atoms:
9025                                         if not portage.dep_getkey(x).startswith("virtual/"):
9026                                                 non_virtual_atoms.append(x)
9027                                 if len(non_virtual_atoms) == 1:
9028                                         expanded_atoms = non_virtual_atoms
9029                         if len(expanded_atoms) > 1:
9030                                 # compatible with portage.cpv_expand()
9031                                 raise portage.exception.AmbiguousPackageName(
9032                                         [portage.dep_getkey(x) for x in expanded_atoms])
9033                         if expanded_atoms:
9034                                 atom = expanded_atoms[0]
9035                         else:
9036                                 null_atom = insert_category_into_atom(atom, "null")
9037                                 null_cp = portage.dep_getkey(null_atom)
9038                                 cat, atom_pn = portage.catsplit(null_cp)
9039                                 virts_p = root_config.settings.get_virts_p().get(atom_pn)
9040                                 if virts_p:
9041                                         # Allow the resolver to choose which virtual.
9042                                         atom = insert_category_into_atom(atom, "virtual")
9043                                 else:
9044                                         atom = insert_category_into_atom(atom, "null")
9045                         return atom
9046
9047                 def aux_get(self, cpv, wants):
9048                         metadata = self._cpv_pkg_map[cpv].metadata
9049                         return [metadata.get(x, "") for x in wants]
9050
9051 class RepoDisplay(object):
9052         def __init__(self, roots):
9053                 self._shown_repos = {}
9054                 self._unknown_repo = False
9055                 repo_paths = set()
9056                 for root_config in roots.itervalues():
9057                         portdir = root_config.settings.get("PORTDIR")
9058                         if portdir:
9059                                 repo_paths.add(portdir)
9060                         overlays = root_config.settings.get("PORTDIR_OVERLAY")
9061                         if overlays:
9062                                 repo_paths.update(overlays.split())
9063                 repo_paths = list(repo_paths)
9064                 self._repo_paths = repo_paths
9065                 self._repo_paths_real = [ os.path.realpath(repo_path) \
9066                         for repo_path in repo_paths ]
9067
9068                 # pre-allocate index for PORTDIR so that it always has index 0.
9069                 for root_config in roots.itervalues():
9070                         portdb = root_config.trees["porttree"].dbapi
9071                         portdir = portdb.porttree_root
9072                         if portdir:
9073                                 self.repoStr(portdir)
9074
9075         def repoStr(self, repo_path_real):
9076                 real_index = -1
9077                 if repo_path_real:
9078                         real_index = self._repo_paths_real.index(repo_path_real)
9079                 if real_index == -1:
9080                         s = "?"
9081                         self._unknown_repo = True
9082                 else:
9083                         shown_repos = self._shown_repos
9084                         repo_paths = self._repo_paths
9085                         repo_path = repo_paths[real_index]
9086                         index = shown_repos.get(repo_path)
9087                         if index is None:
9088                                 index = len(shown_repos)
9089                                 shown_repos[repo_path] = index
9090                         s = str(index)
9091                 return s
9092
9093         def __str__(self):
9094                 output = []
9095                 shown_repos = self._shown_repos
9096                 unknown_repo = self._unknown_repo
9097                 if shown_repos or self._unknown_repo:
9098                         output.append("Portage tree and overlays:\n")
9099                 show_repo_paths = list(shown_repos)
9100                 for repo_path, repo_index in shown_repos.iteritems():
9101                         show_repo_paths[repo_index] = repo_path
9102                 if show_repo_paths:
9103                         for index, repo_path in enumerate(show_repo_paths):
9104                                 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9105                 if unknown_repo:
9106                         output.append(" "+teal("[?]") + \
9107                                 " indicates that the source repository could not be determined\n")
9108                 return "".join(output)
9109
9110 class PackageCounters(object):
9111
9112         def __init__(self):
9113                 self.upgrades   = 0
9114                 self.downgrades = 0
9115                 self.new        = 0
9116                 self.newslot    = 0
9117                 self.reinst     = 0
9118                 self.uninst     = 0
9119                 self.blocks     = 0
9120                 self.blocks_satisfied         = 0
9121                 self.totalsize  = 0
9122                 self.restrict_fetch           = 0
9123                 self.restrict_fetch_satisfied = 0
9124                 self.interactive              = 0
9125
9126         def __str__(self):
9127                 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
9128                 myoutput = []
9129                 details = []
9130                 myoutput.append("Total: %s package" % total_installs)
9131                 if total_installs != 1:
9132                         myoutput.append("s")
9133                 if total_installs != 0:
9134                         myoutput.append(" (")
9135                 if self.upgrades > 0:
9136                         details.append("%s upgrade" % self.upgrades)
9137                         if self.upgrades > 1:
9138                                 details[-1] += "s"
9139                 if self.downgrades > 0:
9140                         details.append("%s downgrade" % self.downgrades)
9141                         if self.downgrades > 1:
9142                                 details[-1] += "s"
9143                 if self.new > 0:
9144                         details.append("%s new" % self.new)
9145                 if self.newslot > 0:
9146                         details.append("%s in new slot" % self.newslot)
9147                         if self.newslot > 1:
9148                                 details[-1] += "s"
9149                 if self.reinst > 0:
9150                         details.append("%s reinstall" % self.reinst)
9151                         if self.reinst > 1:
9152                                 details[-1] += "s"
9153                 if self.uninst > 0:
9154                         details.append("%s uninstall" % self.uninst)
9155                         if self.uninst > 1:
9156                                 details[-1] += "s"
9157                 if self.interactive > 0:
9158                         details.append("%s %s" % (self.interactive,
9159                                 colorize("WARN", "interactive")))
9160                 myoutput.append(", ".join(details))
9161                 if total_installs != 0:
9162                         myoutput.append(")")
9163                 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
9164                 if self.restrict_fetch:
9165                         myoutput.append("\nFetch Restriction: %s package" % \
9166                                 self.restrict_fetch)
9167                         if self.restrict_fetch > 1:
9168                                 myoutput.append("s")
9169                 if self.restrict_fetch_satisfied < self.restrict_fetch:
9170                         myoutput.append(bad(" (%s unsatisfied)") % \
9171                                 (self.restrict_fetch - self.restrict_fetch_satisfied))
9172                 if self.blocks > 0:
9173                         myoutput.append("\nConflict: %s block" % \
9174                                 self.blocks)
9175                         if self.blocks > 1:
9176                                 myoutput.append("s")
9177                         if self.blocks_satisfied < self.blocks:
9178                                 myoutput.append(bad(" (%s unsatisfied)") % \
9179                                         (self.blocks - self.blocks_satisfied))
9180                 return "".join(myoutput)
9181
9182 class PollSelectAdapter(PollConstants):
9183
9184         """
9185         Use select to emulate a poll object, for
9186         systems that don't support poll().
9187         """
9188
9189         def __init__(self):
9190                 self._registered = {}
9191                 self._select_args = [[], [], []]
9192
9193         def register(self, fd, *args):
9194                 """
9195                 Only POLLIN is currently supported!
9196                 """
9197                 if len(args) > 1:
9198                         raise TypeError(
9199                                 "register expected at most 2 arguments, got " + \
9200                                 repr(1 + len(args)))
9201
9202                 eventmask = PollConstants.POLLIN | \
9203                         PollConstants.POLLPRI | PollConstants.POLLOUT
9204                 if args:
9205                         eventmask = args[0]
9206
9207                 self._registered[fd] = eventmask
9208                 self._select_args = None
9209
9210         def unregister(self, fd):
9211                 self._select_args = None
9212                 del self._registered[fd]
9213
9214         def poll(self, *args):
9215                 if len(args) > 1:
9216                         raise TypeError(
9217                                 "poll expected at most 2 arguments, got " + \
9218                                 repr(1 + len(args)))
9219
9220                 timeout = None
9221                 if args:
9222                         timeout = args[0]
9223
9224                 select_args = self._select_args
9225                 if select_args is None:
9226                         select_args = [self._registered.keys(), [], []]
9227
9228                 if timeout is not None:
9229                         select_args = select_args[:]
9230                         # Translate poll() timeout args to select() timeout args:
9231                         #
9232                         #          | units        | value(s) for indefinite block
9233                         # ---------|--------------|------------------------------
9234                         #   poll   | milliseconds | omitted, negative, or None
9235                         # ---------|--------------|------------------------------
9236                         #   select | seconds      | omitted
9237                         # ---------|--------------|------------------------------
9238
9239                         if timeout is not None and timeout < 0:
9240                                 timeout = None
9241                         if timeout is not None:
9242                                 select_args.append(timeout / 1000)
9243
9244                 select_events = select.select(*select_args)
9245                 poll_events = []
9246                 for fd in select_events[0]:
9247                         poll_events.append((fd, PollConstants.POLLIN))
9248                 return poll_events
9249
9250 class SequentialTaskQueue(SlotObject):
9251
9252         __slots__ = ("max_jobs", "running_tasks") + \
9253                 ("_dirty", "_scheduling", "_task_queue")
9254
9255         def __init__(self, **kwargs):
9256                 SlotObject.__init__(self, **kwargs)
9257                 self._task_queue = deque()
9258                 self.running_tasks = set()
9259                 if self.max_jobs is None:
9260                         self.max_jobs = 1
9261                 self._dirty = True
9262
9263         def add(self, task):
9264                 self._task_queue.append(task)
9265                 self._dirty = True
9266
9267         def addFront(self, task):
9268                 self._task_queue.appendleft(task)
9269                 self._dirty = True
9270
9271         def schedule(self):
9272
9273                 if not self._dirty:
9274                         return False
9275
9276                 if not self:
9277                         return False
9278
9279                 if self._scheduling:
9280                         # Ignore any recursive schedule() calls triggered via
9281                         # self._task_exit().
9282                         return False
9283
9284                 self._scheduling = True
9285
9286                 task_queue = self._task_queue
9287                 running_tasks = self.running_tasks
9288                 max_jobs = self.max_jobs
9289                 state_changed = False
9290
9291                 while task_queue and \
9292                         (max_jobs is True or len(running_tasks) < max_jobs):
9293                         task = task_queue.popleft()
9294                         cancelled = getattr(task, "cancelled", None)
9295                         if not cancelled:
9296                                 running_tasks.add(task)
9297                                 task.addExitListener(self._task_exit)
9298                                 task.start()
9299                         state_changed = True
9300
9301                 self._dirty = False
9302                 self._scheduling = False
9303
9304                 return state_changed
9305
9306         def _task_exit(self, task):
9307                 """
9308                 Since we can always rely on exit listeners being called, the set of
9309                 running tasks is always pruned automatically and there is never any need
9310                 to actively prune it.
9311                 """
9312                 self.running_tasks.remove(task)
9313                 if self._task_queue:
9314                         self._dirty = True
9315
9316         def clear(self):
9317                 self._task_queue.clear()
9318                 running_tasks = self.running_tasks
9319                 while running_tasks:
9320                         task = running_tasks.pop()
9321                         task.removeExitListener(self._task_exit)
9322                         task.cancel()
9323                 self._dirty = False
9324
9325         def __nonzero__(self):
9326                 return bool(self._task_queue or self.running_tasks)
9327
9328         def __len__(self):
9329                 return len(self._task_queue) + len(self.running_tasks)
9330
9331 _can_poll_device = None
9332
9333 def can_poll_device():
9334         """
9335         Test if it's possible to use poll() on a device such as a pty. This
9336         is known to fail on Darwin.
9337         @rtype: bool
9338         @returns: True if poll() on a device succeeds, False otherwise.
9339         """
9340
9341         global _can_poll_device
9342         if _can_poll_device is not None:
9343                 return _can_poll_device
9344
9345         if not hasattr(select, "poll"):
9346                 _can_poll_device = False
9347                 return _can_poll_device
9348
9349         try:
9350                 dev_null = open('/dev/null', 'rb')
9351         except IOError:
9352                 _can_poll_device = False
9353                 return _can_poll_device
9354
9355         p = select.poll()
9356         p.register(dev_null.fileno(), PollConstants.POLLIN)
9357
9358         invalid_request = False
9359         for f, event in p.poll():
9360                 if event & PollConstants.POLLNVAL:
9361                         invalid_request = True
9362                         break
9363         dev_null.close()
9364
9365         _can_poll_device = not invalid_request
9366         return _can_poll_device
9367
9368 def create_poll_instance():
9369         """
9370         Create an instance of select.poll, or an instance of
9371         PollSelectAdapter there is no poll() implementation or
9372         it is broken somehow.
9373         """
9374         if can_poll_device():
9375                 return select.poll()
9376         return PollSelectAdapter()
9377
9378 getloadavg = getattr(os, "getloadavg", None)
9379 if getloadavg is None:
9380         def getloadavg():
9381                 """
9382                 Uses /proc/loadavg to emulate os.getloadavg().
9383                 Raises OSError if the load average was unobtainable.
9384                 """
9385                 try:
9386                         loadavg_str = open('/proc/loadavg').readline()
9387                 except IOError:
9388                         # getloadavg() is only supposed to raise OSError, so convert
9389                         raise OSError('unknown')
9390                 loadavg_split = loadavg_str.split()
9391                 if len(loadavg_split) < 3:
9392                         raise OSError('unknown')
9393                 loadavg_floats = []
9394                 for i in xrange(3):
9395                         try:
9396                                 loadavg_floats.append(float(loadavg_split[i]))
9397                         except ValueError:
9398                                 raise OSError('unknown')
9399                 return tuple(loadavg_floats)
9400
9401 class PollScheduler(object):
9402
9403         class _sched_iface_class(SlotObject):
9404                 __slots__ = ("register", "schedule", "unregister")
9405
9406         def __init__(self):
9407                 self._max_jobs = 1
9408                 self._max_load = None
9409                 self._jobs = 0
9410                 self._poll_event_queue = []
9411                 self._poll_event_handlers = {}
9412                 self._poll_event_handler_ids = {}
9413                 # Increment id for each new handler.
9414                 self._event_handler_id = 0
9415                 self._poll_obj = create_poll_instance()
9416                 self._scheduling = False
9417
9418         def _schedule(self):
9419                 """
9420                 Calls _schedule_tasks() and automatically returns early from
9421                 any recursive calls to this method that the _schedule_tasks()
9422                 call might trigger. This makes _schedule() safe to call from
9423                 inside exit listeners.
9424                 """
9425                 if self._scheduling:
9426                         return False
9427                 self._scheduling = True
9428                 try:
9429                         return self._schedule_tasks()
9430                 finally:
9431                         self._scheduling = False
9432
9433         def _running_job_count(self):
9434                 return self._jobs
9435
9436         def _can_add_job(self):
9437                 max_jobs = self._max_jobs
9438                 max_load = self._max_load
9439
9440                 if self._max_jobs is not True and \
9441                         self._running_job_count() >= self._max_jobs:
9442                         return False
9443
9444                 if max_load is not None and \
9445                         (max_jobs is True or max_jobs > 1) and \
9446                         self._running_job_count() >= 1:
9447                         try:
9448                                 avg1, avg5, avg15 = getloadavg()
9449                         except OSError:
9450                                 return False
9451
9452                         if avg1 >= max_load:
9453                                 return False
9454
9455                 return True
9456
9457         def _poll(self, timeout=None):
9458                 """
9459                 All poll() calls pass through here. The poll events
9460                 are added directly to self._poll_event_queue.
9461                 In order to avoid endless blocking, this raises
9462                 StopIteration if timeout is None and there are
9463                 no file descriptors to poll.
9464                 """
9465                 if not self._poll_event_handlers:
9466                         self._schedule()
9467                         if timeout is None and \
9468                                 not self._poll_event_handlers:
9469                                 raise StopIteration(
9470                                         "timeout is None and there are no poll() event handlers")
9471
9472                 # The following error is known to occur with Linux kernel versions
9473                 # less than 2.6.24:
9474                 #
9475                 #   select.error: (4, 'Interrupted system call')
9476                 #
9477                 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9478                 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9479                 # without any events.
9480                 while True:
9481                         try:
9482                                 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9483                                 break
9484                         except select.error, e:
9485                                 writemsg_level("\n!!! select error: %s\n" % (e,),
9486                                         level=logging.ERROR, noiselevel=-1)
9487                                 del e
9488                                 if timeout is not None:
9489                                         break
9490
9491         def _next_poll_event(self, timeout=None):
9492                 """
9493                 Since the _schedule_wait() loop is called by event
9494                 handlers from _poll_loop(), maintain a central event
9495                 queue for both of them to share events from a single
9496                 poll() call. In order to avoid endless blocking, this
9497                 raises StopIteration if timeout is None and there are
9498                 no file descriptors to poll.
9499                 """
9500                 if not self._poll_event_queue:
9501                         self._poll(timeout)
9502                 return self._poll_event_queue.pop()
9503
9504         def _poll_loop(self):
9505
9506                 event_handlers = self._poll_event_handlers
9507                 event_handled = False
9508
9509                 try:
9510                         while event_handlers:
9511                                 f, event = self._next_poll_event()
9512                                 handler, reg_id = event_handlers[f]
9513                                 handler(f, event)
9514                                 event_handled = True
9515                 except StopIteration:
9516                         event_handled = True
9517
9518                 if not event_handled:
9519                         raise AssertionError("tight loop")
9520
9521         def _schedule_yield(self):
9522                 """
9523                 Schedule for a short period of time chosen by the scheduler based
9524                 on internal state. Synchronous tasks should call this periodically
9525                 in order to allow the scheduler to service pending poll events. The
9526                 scheduler will call poll() exactly once, without blocking, and any
9527                 resulting poll events will be serviced.
9528                 """
9529                 event_handlers = self._poll_event_handlers
9530                 events_handled = 0
9531
9532                 if not event_handlers:
9533                         return bool(events_handled)
9534
9535                 if not self._poll_event_queue:
9536                         self._poll(0)
9537
9538                 try:
9539                         while event_handlers and self._poll_event_queue:
9540                                 f, event = self._next_poll_event()
9541                                 handler, reg_id = event_handlers[f]
9542                                 handler(f, event)
9543                                 events_handled += 1
9544                 except StopIteration:
9545                         events_handled += 1
9546
9547                 return bool(events_handled)
9548
9549         def _register(self, f, eventmask, handler):
9550                 """
9551                 @rtype: Integer
9552                 @return: A unique registration id, for use in schedule() or
9553                         unregister() calls.
9554                 """
9555                 if f in self._poll_event_handlers:
9556                         raise AssertionError("fd %d is already registered" % f)
9557                 self._event_handler_id += 1
9558                 reg_id = self._event_handler_id
9559                 self._poll_event_handler_ids[reg_id] = f
9560                 self._poll_event_handlers[f] = (handler, reg_id)
9561                 self._poll_obj.register(f, eventmask)
9562                 return reg_id
9563
9564         def _unregister(self, reg_id):
9565                 f = self._poll_event_handler_ids[reg_id]
9566                 self._poll_obj.unregister(f)
9567                 del self._poll_event_handlers[f]
9568                 del self._poll_event_handler_ids[reg_id]
9569
9570         def _schedule_wait(self, wait_ids):
9571                 """
9572                 Schedule until wait_id is not longer registered
9573                 for poll() events.
9574                 @type wait_id: int
9575                 @param wait_id: a task id to wait for
9576                 """
9577                 event_handlers = self._poll_event_handlers
9578                 handler_ids = self._poll_event_handler_ids
9579                 event_handled = False
9580
9581                 if isinstance(wait_ids, int):
9582                         wait_ids = frozenset([wait_ids])
9583
9584                 try:
9585                         while wait_ids.intersection(handler_ids):
9586                                 f, event = self._next_poll_event()
9587                                 handler, reg_id = event_handlers[f]
9588                                 handler(f, event)
9589                                 event_handled = True
9590                 except StopIteration:
9591                         event_handled = True
9592
9593                 return event_handled
9594
9595 class QueueScheduler(PollScheduler):
9596
9597         """
9598         Add instances of SequentialTaskQueue and then call run(). The
9599         run() method returns when no tasks remain.
9600         """
9601
9602         def __init__(self, max_jobs=None, max_load=None):
9603                 PollScheduler.__init__(self)
9604
9605                 if max_jobs is None:
9606                         max_jobs = 1
9607
9608                 self._max_jobs = max_jobs
9609                 self._max_load = max_load
9610                 self.sched_iface = self._sched_iface_class(
9611                         register=self._register,
9612                         schedule=self._schedule_wait,
9613                         unregister=self._unregister)
9614
9615                 self._queues = []
9616                 self._schedule_listeners = []
9617
9618         def add(self, q):
9619                 self._queues.append(q)
9620
9621         def remove(self, q):
9622                 self._queues.remove(q)
9623
9624         def run(self):
9625
9626                 while self._schedule():
9627                         self._poll_loop()
9628
9629                 while self._running_job_count():
9630                         self._poll_loop()
9631
9632         def _schedule_tasks(self):
9633                 """
9634                 @rtype: bool
9635                 @returns: True if there may be remaining tasks to schedule,
9636                         False otherwise.
9637                 """
9638                 while self._can_add_job():
9639                         n = self._max_jobs - self._running_job_count()
9640                         if n < 1:
9641                                 break
9642
9643                         if not self._start_next_job(n):
9644                                 return False
9645
9646                 for q in self._queues:
9647                         if q:
9648                                 return True
9649                 return False
9650
9651         def _running_job_count(self):
9652                 job_count = 0
9653                 for q in self._queues:
9654                         job_count += len(q.running_tasks)
9655                 self._jobs = job_count
9656                 return job_count
9657
9658         def _start_next_job(self, n=1):
9659                 started_count = 0
9660                 for q in self._queues:
9661                         initial_job_count = len(q.running_tasks)
9662                         q.schedule()
9663                         final_job_count = len(q.running_tasks)
9664                         if final_job_count > initial_job_count:
9665                                 started_count += (final_job_count - initial_job_count)
9666                         if started_count >= n:
9667                                 break
9668                 return started_count
9669
9670 class TaskScheduler(object):
9671
9672         """
9673         A simple way to handle scheduling of AsynchrousTask instances. Simply
9674         add tasks and call run(). The run() method returns when no tasks remain.
9675         """
9676
9677         def __init__(self, max_jobs=None, max_load=None):
9678                 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9679                 self._scheduler = QueueScheduler(
9680                         max_jobs=max_jobs, max_load=max_load)
9681                 self.sched_iface = self._scheduler.sched_iface
9682                 self.run = self._scheduler.run
9683                 self._scheduler.add(self._queue)
9684
9685         def add(self, task):
9686                 self._queue.add(task)
9687
9688 class JobStatusDisplay(object):
9689
9690         _bound_properties = ("curval", "failed", "running")
9691         _jobs_column_width = 48
9692
9693         # Don't update the display unless at least this much
9694         # time has passed, in units of seconds.
9695         _min_display_latency = 2
9696
9697         _default_term_codes = {
9698                 'cr'  : '\r',
9699                 'el'  : '\x1b[K',
9700                 'nel' : '\n',
9701         }
9702
9703         _termcap_name_map = {
9704                 'carriage_return' : 'cr',
9705                 'clr_eol'         : 'el',
9706                 'newline'         : 'nel',
9707         }
9708
9709         def __init__(self, out=sys.stdout, quiet=False):
9710                 object.__setattr__(self, "out", out)
9711                 object.__setattr__(self, "quiet", quiet)
9712                 object.__setattr__(self, "maxval", 0)
9713                 object.__setattr__(self, "merges", 0)
9714                 object.__setattr__(self, "_changed", False)
9715                 object.__setattr__(self, "_displayed", False)
9716                 object.__setattr__(self, "_last_display_time", 0)
9717                 object.__setattr__(self, "width", 80)
9718                 self.reset()
9719
9720                 isatty = hasattr(out, "isatty") and out.isatty()
9721                 object.__setattr__(self, "_isatty", isatty)
9722                 if not isatty or not self._init_term():
9723                         term_codes = {}
9724                         for k, capname in self._termcap_name_map.iteritems():
9725                                 term_codes[k] = self._default_term_codes[capname]
9726                         object.__setattr__(self, "_term_codes", term_codes)
9727                 encoding = sys.getdefaultencoding()
9728                 for k, v in self._term_codes.items():
9729                         if not isinstance(v, basestring):
9730                                 self._term_codes[k] = v.decode(encoding, 'replace')
9731
9732         def _init_term(self):
9733                 """
9734                 Initialize term control codes.
9735                 @rtype: bool
9736                 @returns: True if term codes were successfully initialized,
9737                         False otherwise.
9738                 """
9739
9740                 term_type = os.environ.get("TERM", "vt100")
9741                 tigetstr = None
9742
9743                 try:
9744                         import curses
9745                         try:
9746                                 curses.setupterm(term_type, self.out.fileno())
9747                                 tigetstr = curses.tigetstr
9748                         except curses.error:
9749                                 pass
9750                 except ImportError:
9751                         pass
9752
9753                 if tigetstr is None:
9754                         return False
9755
9756                 term_codes = {}
9757                 for k, capname in self._termcap_name_map.iteritems():
9758                         code = tigetstr(capname)
9759                         if code is None:
9760                                 code = self._default_term_codes[capname]
9761                         term_codes[k] = code
9762                 object.__setattr__(self, "_term_codes", term_codes)
9763                 return True
9764
9765         def _format_msg(self, msg):
9766                 return ">>> %s" % msg
9767
9768         def _erase(self):
9769                 self.out.write(
9770                         self._term_codes['carriage_return'] + \
9771                         self._term_codes['clr_eol'])
9772                 self.out.flush()
9773                 self._displayed = False
9774
9775         def _display(self, line):
9776                 self.out.write(line)
9777                 self.out.flush()
9778                 self._displayed = True
9779
9780         def _update(self, msg):
9781
9782                 out = self.out
9783                 if not self._isatty:
9784                         out.write(self._format_msg(msg) + self._term_codes['newline'])
9785                         self.out.flush()
9786                         self._displayed = True
9787                         return
9788
9789                 if self._displayed:
9790                         self._erase()
9791
9792                 self._display(self._format_msg(msg))
9793
9794         def displayMessage(self, msg):
9795
9796                 was_displayed = self._displayed
9797
9798                 if self._isatty and self._displayed:
9799                         self._erase()
9800
9801                 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9802                 self.out.flush()
9803                 self._displayed = False
9804
9805                 if was_displayed:
9806                         self._changed = True
9807                         self.display()
9808
9809         def reset(self):
9810                 self.maxval = 0
9811                 self.merges = 0
9812                 for name in self._bound_properties:
9813                         object.__setattr__(self, name, 0)
9814
9815                 if self._displayed:
9816                         self.out.write(self._term_codes['newline'])
9817                         self.out.flush()
9818                         self._displayed = False
9819
9820         def __setattr__(self, name, value):
9821                 old_value = getattr(self, name)
9822                 if value == old_value:
9823                         return
9824                 object.__setattr__(self, name, value)
9825                 if name in self._bound_properties:
9826                         self._property_change(name, old_value, value)
9827
9828         def _property_change(self, name, old_value, new_value):
9829                 self._changed = True
9830                 self.display()
9831
9832         def _load_avg_str(self):
9833                 try:
9834                         avg = getloadavg()
9835                 except OSError:
9836                         return 'unknown'
9837
9838                 max_avg = max(avg)
9839
9840                 if max_avg < 10:
9841                         digits = 2
9842                 elif max_avg < 100:
9843                         digits = 1
9844                 else:
9845                         digits = 0
9846
9847                 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9848
9849         def display(self):
9850                 """
9851                 Display status on stdout, but only if something has
9852                 changed since the last call.
9853                 """
9854
9855                 if self.quiet:
9856                         return
9857
9858                 current_time = time.time()
9859                 time_delta = current_time - self._last_display_time
9860                 if self._displayed and \
9861                         not self._changed:
9862                         if not self._isatty:
9863                                 return
9864                         if time_delta < self._min_display_latency:
9865                                 return
9866
9867                 self._last_display_time = current_time
9868                 self._changed = False
9869                 self._display_status()
9870
9871         def _display_status(self):
9872                 # Don't use len(self._completed_tasks) here since that also
9873                 # can include uninstall tasks.
9874                 curval_str = str(self.curval)
9875                 maxval_str = str(self.maxval)
9876                 running_str = str(self.running)
9877                 failed_str = str(self.failed)
9878                 load_avg_str = self._load_avg_str()
9879
9880                 color_output = StringIO()
9881                 plain_output = StringIO()
9882                 style_file = portage.output.ConsoleStyleFile(color_output)
9883                 style_file.write_listener = plain_output
9884                 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9885                 style_writer.style_listener = style_file.new_styles
9886                 f = formatter.AbstractFormatter(style_writer)
9887
9888                 number_style = "INFORM"
9889                 f.add_literal_data("Jobs: ")
9890                 f.push_style(number_style)
9891                 f.add_literal_data(curval_str)
9892                 f.pop_style()
9893                 f.add_literal_data(" of ")
9894                 f.push_style(number_style)
9895                 f.add_literal_data(maxval_str)
9896                 f.pop_style()
9897                 f.add_literal_data(" complete")
9898
9899                 if self.running:
9900                         f.add_literal_data(", ")
9901                         f.push_style(number_style)
9902                         f.add_literal_data(running_str)
9903                         f.pop_style()
9904                         f.add_literal_data(" running")
9905
9906                 if self.failed:
9907                         f.add_literal_data(", ")
9908                         f.push_style(number_style)
9909                         f.add_literal_data(failed_str)
9910                         f.pop_style()
9911                         f.add_literal_data(" failed")
9912
9913                 padding = self._jobs_column_width - len(plain_output.getvalue())
9914                 if padding > 0:
9915                         f.add_literal_data(padding * " ")
9916
9917                 f.add_literal_data("Load avg: ")
9918                 f.add_literal_data(load_avg_str)
9919
9920                 # Truncate to fit width, to avoid making the terminal scroll if the
9921                 # line overflows (happens when the load average is large).
9922                 plain_output = plain_output.getvalue()
9923                 if self._isatty and len(plain_output) > self.width:
9924                         # Use plain_output here since it's easier to truncate
9925                         # properly than the color output which contains console
9926                         # color codes.
9927                         self._update(plain_output[:self.width])
9928                 else:
9929                         self._update(color_output.getvalue())
9930
9931                 xtermTitle(" ".join(plain_output.split()))
9932
9933 class Scheduler(PollScheduler):
9934
9935         _opts_ignore_blockers = \
9936                 frozenset(["--buildpkgonly",
9937                 "--fetchonly", "--fetch-all-uri",
9938                 "--nodeps", "--pretend"])
9939
9940         _opts_no_background = \
9941                 frozenset(["--pretend",
9942                 "--fetchonly", "--fetch-all-uri"])
9943
9944         _opts_no_restart = frozenset(["--buildpkgonly",
9945                 "--fetchonly", "--fetch-all-uri", "--pretend"])
9946
9947         _bad_resume_opts = set(["--ask", "--changelog",
9948                 "--resume", "--skipfirst"])
9949
9950         _fetch_log = "/var/log/emerge-fetch.log"
9951
9952         class _iface_class(SlotObject):
9953                 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
9954                         "dblinkElog", "fetch", "register", "schedule",
9955                         "scheduleSetup", "scheduleUnpack", "scheduleYield",
9956                         "unregister")
9957
9958         class _fetch_iface_class(SlotObject):
9959                 __slots__ = ("log_file", "schedule")
9960
9961         _task_queues_class = slot_dict_class(
9962                 ("merge", "jobs", "fetch", "unpack"), prefix="")
9963
9964         class _build_opts_class(SlotObject):
9965                 __slots__ = ("buildpkg", "buildpkgonly",
9966                         "fetch_all_uri", "fetchonly", "pretend")
9967
9968         class _binpkg_opts_class(SlotObject):
9969                 __slots__ = ("fetchonly", "getbinpkg", "pretend")
9970
9971         class _pkg_count_class(SlotObject):
9972                 __slots__ = ("curval", "maxval")
9973
9974         class _emerge_log_class(SlotObject):
9975                 __slots__ = ("xterm_titles",)
9976
9977                 def log(self, *pargs, **kwargs):
9978                         if not self.xterm_titles:
9979                                 # Avoid interference with the scheduler's status display.
9980                                 kwargs.pop("short_msg", None)
9981                         emergelog(self.xterm_titles, *pargs, **kwargs)
9982
9983         class _failed_pkg(SlotObject):
9984                 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
9985
9986         class _ConfigPool(object):
9987                 """Interface for a task to temporarily allocate a config
9988                 instance from a pool. This allows a task to be constructed
9989                 long before the config instance actually becomes needed, like
9990                 when prefetchers are constructed for the whole merge list."""
9991                 __slots__ = ("_root", "_allocate", "_deallocate")
9992                 def __init__(self, root, allocate, deallocate):
9993                         self._root = root
9994                         self._allocate = allocate
9995                         self._deallocate = deallocate
9996                 def allocate(self):
9997                         return self._allocate(self._root)
9998                 def deallocate(self, settings):
9999                         self._deallocate(settings)
10000
10001         class _unknown_internal_error(portage.exception.PortageException):
10002                 """
10003                 Used internally to terminate scheduling. The specific reason for
10004                 the failure should have been dumped to stderr.
10005                 """
10006                 def __init__(self, value=""):
10007                         portage.exception.PortageException.__init__(self, value)
10008
10009         def __init__(self, settings, trees, mtimedb, myopts,
10010                 spinner, mergelist, favorites, digraph):
10011                 PollScheduler.__init__(self)
10012                 self.settings = settings
10013                 self.target_root = settings["ROOT"]
10014                 self.trees = trees
10015                 self.myopts = myopts
10016                 self._spinner = spinner
10017                 self._mtimedb = mtimedb
10018                 self._mergelist = mergelist
10019                 self._favorites = favorites
10020                 self._args_set = InternalPackageSet(favorites)
10021                 self._build_opts = self._build_opts_class()
10022                 for k in self._build_opts.__slots__:
10023                         setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
10024                 self._binpkg_opts = self._binpkg_opts_class()
10025                 for k in self._binpkg_opts.__slots__:
10026                         setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
10027
10028                 self.curval = 0
10029                 self._logger = self._emerge_log_class()
10030                 self._task_queues = self._task_queues_class()
10031                 for k in self._task_queues.allowed_keys:
10032                         setattr(self._task_queues, k,
10033                                 SequentialTaskQueue())
10034
10035                 # Holds merges that will wait to be executed when no builds are
10036                 # executing. This is useful for system packages since dependencies
10037                 # on system packages are frequently unspecified.
10038                 self._merge_wait_queue = []
10039                 # Holds merges that have been transfered from the merge_wait_queue to
10040                 # the actual merge queue. They are removed from this list upon
10041                 # completion. Other packages can start building only when this list is
10042                 # empty.
10043                 self._merge_wait_scheduled = []
10044
10045                 # Holds system packages and their deep runtime dependencies. Before
10046                 # being merged, these packages go to merge_wait_queue, to be merged
10047                 # when no other packages are building.
10048                 self._deep_system_deps = set()
10049
10050                 # Holds packages to merge which will satisfy currently unsatisfied
10051                 # deep runtime dependencies of system packages. If this is not empty
10052                 # then no parallel builds will be spawned until it is empty. This
10053                 # minimizes the possibility that a build will fail due to the system
10054                 # being in a fragile state. For example, see bug #259954.
10055                 self._unsatisfied_system_deps = set()
10056
10057                 self._status_display = JobStatusDisplay()
10058                 self._max_load = myopts.get("--load-average")
10059                 max_jobs = myopts.get("--jobs")
10060                 if max_jobs is None:
10061                         max_jobs = 1
10062                 self._set_max_jobs(max_jobs)
10063
10064                 # The root where the currently running
10065                 # portage instance is installed.
10066                 self._running_root = trees["/"]["root_config"]
10067                 self.edebug = 0
10068                 if settings.get("PORTAGE_DEBUG", "") == "1":
10069                         self.edebug = 1
10070                 self.pkgsettings = {}
10071                 self._config_pool = {}
10072                 self._blocker_db = {}
10073                 for root in trees:
10074                         self._config_pool[root] = []
10075                         self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
10076
10077                 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
10078                         schedule=self._schedule_fetch)
10079                 self._sched_iface = self._iface_class(
10080                         dblinkEbuildPhase=self._dblink_ebuild_phase,
10081                         dblinkDisplayMerge=self._dblink_display_merge,
10082                         dblinkElog=self._dblink_elog,
10083                         fetch=fetch_iface, register=self._register,
10084                         schedule=self._schedule_wait,
10085                         scheduleSetup=self._schedule_setup,
10086                         scheduleUnpack=self._schedule_unpack,
10087                         scheduleYield=self._schedule_yield,
10088                         unregister=self._unregister)
10089
10090                 self._prefetchers = weakref.WeakValueDictionary()
10091                 self._pkg_queue = []
10092                 self._completed_tasks = set()
10093
10094                 self._failed_pkgs = []
10095                 self._failed_pkgs_all = []
10096                 self._failed_pkgs_die_msgs = []
10097                 self._post_mod_echo_msgs = []
10098                 self._parallel_fetch = False
10099                 merge_count = len([x for x in mergelist \
10100                         if isinstance(x, Package) and x.operation == "merge"])
10101                 self._pkg_count = self._pkg_count_class(
10102                         curval=0, maxval=merge_count)
10103                 self._status_display.maxval = self._pkg_count.maxval
10104
10105                 # The load average takes some time to respond when new
10106                 # jobs are added, so we need to limit the rate of adding
10107                 # new jobs.
10108                 self._job_delay_max = 10
10109                 self._job_delay_factor = 1.0
10110                 self._job_delay_exp = 1.5
10111                 self._previous_job_start_time = None
10112
10113                 self._set_digraph(digraph)
10114
10115                 # This is used to memoize the _choose_pkg() result when
10116                 # no packages can be chosen until one of the existing
10117                 # jobs completes.
10118                 self._choose_pkg_return_early = False
10119
10120                 features = self.settings.features
10121                 if "parallel-fetch" in features and \
10122                         not ("--pretend" in self.myopts or \
10123                         "--fetch-all-uri" in self.myopts or \
10124                         "--fetchonly" in self.myopts):
10125                         if "distlocks" not in features:
10126                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10127                                 portage.writemsg(red("!!!")+" parallel-fetching " + \
10128                                         "requires the distlocks feature enabled"+"\n",
10129                                         noiselevel=-1)
10130                                 portage.writemsg(red("!!!")+" you have it disabled, " + \
10131                                         "thus parallel-fetching is being disabled"+"\n",
10132                                         noiselevel=-1)
10133                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10134                         elif len(mergelist) > 1:
10135                                 self._parallel_fetch = True
10136
10137                 if self._parallel_fetch:
10138                                 # clear out existing fetch log if it exists
10139                                 try:
10140                                         open(self._fetch_log, 'w')
10141                                 except EnvironmentError:
10142                                         pass
10143
10144                 self._running_portage = None
10145                 portage_match = self._running_root.trees["vartree"].dbapi.match(
10146                         portage.const.PORTAGE_PACKAGE_ATOM)
10147                 if portage_match:
10148                         cpv = portage_match.pop()
10149                         self._running_portage = self._pkg(cpv, "installed",
10150                                 self._running_root, installed=True)
10151
10152         def _poll(self, timeout=None):
10153                 self._schedule()
10154                 PollScheduler._poll(self, timeout=timeout)
10155
10156         def _set_max_jobs(self, max_jobs):
10157                 self._max_jobs = max_jobs
10158                 self._task_queues.jobs.max_jobs = max_jobs
10159
10160         def _background_mode(self):
10161                 """
10162                 Check if background mode is enabled and adjust states as necessary.
10163
10164                 @rtype: bool
10165                 @returns: True if background mode is enabled, False otherwise.
10166                 """
10167                 background = (self._max_jobs is True or \
10168                         self._max_jobs > 1 or "--quiet" in self.myopts) and \
10169                         not bool(self._opts_no_background.intersection(self.myopts))
10170
10171                 if background:
10172                         interactive_tasks = self._get_interactive_tasks()
10173                         if interactive_tasks:
10174                                 background = False
10175                                 writemsg_level(">>> Sending package output to stdio due " + \
10176                                         "to interactive package(s):\n",
10177                                         level=logging.INFO, noiselevel=-1)
10178                                 msg = [""]
10179                                 for pkg in interactive_tasks:
10180                                         pkg_str = "  " + colorize("INFORM", str(pkg.cpv))
10181                                         if pkg.root != "/":
10182                                                 pkg_str += " for " + pkg.root
10183                                         msg.append(pkg_str)
10184                                 msg.append("")
10185                                 writemsg_level("".join("%s\n" % (l,) for l in msg),
10186                                         level=logging.INFO, noiselevel=-1)
10187                                 if self._max_jobs is True or self._max_jobs > 1:
10188                                         self._set_max_jobs(1)
10189                                         writemsg_level(">>> Setting --jobs=1 due " + \
10190                                                 "to the above interactive package(s)\n",
10191                                                 level=logging.INFO, noiselevel=-1)
10192
10193                 self._status_display.quiet = \
10194                         not background or \
10195                         ("--quiet" in self.myopts and \
10196                         "--verbose" not in self.myopts)
10197
10198                 self._logger.xterm_titles = \
10199                         "notitles" not in self.settings.features and \
10200                         self._status_display.quiet
10201
10202                 return background
10203
10204         def _get_interactive_tasks(self):
10205                 from portage import flatten
10206                 from portage.dep import use_reduce, paren_reduce
10207                 interactive_tasks = []
10208                 for task in self._mergelist:
10209                         if not (isinstance(task, Package) and \
10210                                 task.operation == "merge"):
10211                                 continue
10212                         try:
10213                                 properties = flatten(use_reduce(paren_reduce(
10214                                         task.metadata["PROPERTIES"]), uselist=task.use.enabled))
10215                         except portage.exception.InvalidDependString, e:
10216                                 show_invalid_depstring_notice(task,
10217                                         task.metadata["PROPERTIES"], str(e))
10218                                 raise self._unknown_internal_error()
10219                         if "interactive" in properties:
10220                                 interactive_tasks.append(task)
10221                 return interactive_tasks
10222
10223         def _set_digraph(self, digraph):
10224                 if "--nodeps" in self.myopts or \
10225                         (self._max_jobs is not True and self._max_jobs < 2):
10226                         # save some memory
10227                         self._digraph = None
10228                         return
10229
10230                 self._digraph = digraph
10231                 self._find_system_deps()
10232                 self._prune_digraph()
10233                 self._prevent_builddir_collisions()
10234
10235         def _find_system_deps(self):
10236                 """
10237                 Find system packages and their deep runtime dependencies. Before being
10238                 merged, these packages go to merge_wait_queue, to be merged when no
10239                 other packages are building.
10240                 """
10241                 deep_system_deps = self._deep_system_deps
10242                 deep_system_deps.clear()
10243                 deep_system_deps.update(
10244                         _find_deep_system_runtime_deps(self._digraph))
10245                 deep_system_deps.difference_update([pkg for pkg in \
10246                         deep_system_deps if pkg.operation != "merge"])
10247
10248         def _prune_digraph(self):
10249                 """
10250                 Prune any root nodes that are irrelevant.
10251                 """
10252
10253                 graph = self._digraph
10254                 completed_tasks = self._completed_tasks
10255                 removed_nodes = set()
10256                 while True:
10257                         for node in graph.root_nodes():
10258                                 if not isinstance(node, Package) or \
10259                                         (node.installed and node.operation == "nomerge") or \
10260                                         node.onlydeps or \
10261                                         node in completed_tasks:
10262                                         removed_nodes.add(node)
10263                         if removed_nodes:
10264                                 graph.difference_update(removed_nodes)
10265                         if not removed_nodes:
10266                                 break
10267                         removed_nodes.clear()
10268
10269         def _prevent_builddir_collisions(self):
10270                 """
10271                 When building stages, sometimes the same exact cpv needs to be merged
10272                 to both $ROOTs. Add edges to the digraph in order to avoid collisions
10273                 in the builddir. Currently, normal file locks would be inappropriate
10274                 for this purpose since emerge holds all of it's build dir locks from
10275                 the main process.
10276                 """
10277                 cpv_map = {}
10278                 for pkg in self._mergelist:
10279                         if not isinstance(pkg, Package):
10280                                 # a satisfied blocker
10281                                 continue
10282                         if pkg.installed:
10283                                 continue
10284                         if pkg.cpv not in cpv_map:
10285                                 cpv_map[pkg.cpv] = [pkg]
10286                                 continue
10287                         for earlier_pkg in cpv_map[pkg.cpv]:
10288                                 self._digraph.add(earlier_pkg, pkg,
10289                                         priority=DepPriority(buildtime=True))
10290                         cpv_map[pkg.cpv].append(pkg)
10291
10292         class _pkg_failure(portage.exception.PortageException):
10293                 """
10294                 An instance of this class is raised by unmerge() when
10295                 an uninstallation fails.
10296                 """
10297                 status = 1
10298                 def __init__(self, *pargs):
10299                         portage.exception.PortageException.__init__(self, pargs)
10300                         if pargs:
10301                                 self.status = pargs[0]
10302
10303         def _schedule_fetch(self, fetcher):
10304                 """
10305                 Schedule a fetcher on the fetch queue, in order to
10306                 serialize access to the fetch log.
10307                 """
10308                 self._task_queues.fetch.addFront(fetcher)
10309
10310         def _schedule_setup(self, setup_phase):
10311                 """
10312                 Schedule a setup phase on the merge queue, in order to
10313                 serialize unsandboxed access to the live filesystem.
10314                 """
10315                 self._task_queues.merge.addFront(setup_phase)
10316                 self._schedule()
10317
10318         def _schedule_unpack(self, unpack_phase):
10319                 """
10320                 Schedule an unpack phase on the unpack queue, in order
10321                 to serialize $DISTDIR access for live ebuilds.
10322                 """
10323                 self._task_queues.unpack.add(unpack_phase)
10324
10325         def _find_blockers(self, new_pkg):
10326                 """
10327                 Returns a callable which should be called only when
10328                 the vdb lock has been acquired.
10329                 """
10330                 def get_blockers():
10331                         return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
10332                 return get_blockers
10333
10334         def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
10335                 if self._opts_ignore_blockers.intersection(self.myopts):
10336                         return None
10337
10338                 # Call gc.collect() here to avoid heap overflow that
10339                 # triggers 'Cannot allocate memory' errors (reported
10340                 # with python-2.5).
10341                 import gc
10342                 gc.collect()
10343
10344                 blocker_db = self._blocker_db[new_pkg.root]
10345
10346                 blocker_dblinks = []
10347                 for blocking_pkg in blocker_db.findInstalledBlockers(
10348                         new_pkg, acquire_lock=acquire_lock):
10349                         if new_pkg.slot_atom == blocking_pkg.slot_atom:
10350                                 continue
10351                         if new_pkg.cpv == blocking_pkg.cpv:
10352                                 continue
10353                         blocker_dblinks.append(portage.dblink(
10354                                 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
10355                                 self.pkgsettings[blocking_pkg.root], treetype="vartree",
10356                                 vartree=self.trees[blocking_pkg.root]["vartree"]))
10357
10358                 gc.collect()
10359
10360                 return blocker_dblinks
10361
10362         def _dblink_pkg(self, pkg_dblink):
10363                 cpv = pkg_dblink.mycpv
10364                 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10365                 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10366                 installed = type_name == "installed"
10367                 return self._pkg(cpv, type_name, root_config, installed=installed)
10368
10369         def _append_to_log_path(self, log_path, msg):
10370                 f = open(log_path, 'a')
10371                 try:
10372                         f.write(msg)
10373                 finally:
10374                         f.close()
10375
10376         def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10377
10378                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10379                 log_file = None
10380                 out = sys.stdout
10381                 background = self._background
10382
10383                 if background and log_path is not None:
10384                         log_file = open(log_path, 'a')
10385                         out = log_file
10386
10387                 try:
10388                         for msg in msgs:
10389                                 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10390                 finally:
10391                         if log_file is not None:
10392                                 log_file.close()
10393
10394         def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10395                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10396                 background = self._background
10397
10398                 if log_path is None:
10399                         if not (background and level < logging.WARN):
10400                                 portage.util.writemsg_level(msg,
10401                                         level=level, noiselevel=noiselevel)
10402                 else:
10403                         if not background:
10404                                 portage.util.writemsg_level(msg,
10405                                         level=level, noiselevel=noiselevel)
10406                         self._append_to_log_path(log_path, msg)
10407
10408         def _dblink_ebuild_phase(self,
10409                 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10410                 """
10411                 Using this callback for merge phases allows the scheduler
10412                 to run while these phases execute asynchronously, and allows
10413                 the scheduler control output handling.
10414                 """
10415
10416                 scheduler = self._sched_iface
10417                 settings = pkg_dblink.settings
10418                 pkg = self._dblink_pkg(pkg_dblink)
10419                 background = self._background
10420                 log_path = settings.get("PORTAGE_LOG_FILE")
10421
10422                 ebuild_phase = EbuildPhase(background=background,
10423                         pkg=pkg, phase=phase, scheduler=scheduler,
10424                         settings=settings, tree=pkg_dblink.treetype)
10425                 ebuild_phase.start()
10426                 ebuild_phase.wait()
10427
10428                 return ebuild_phase.returncode
10429
10430         def _generate_digests(self):
10431                 """
10432                 Generate digests if necessary for --digests or FEATURES=digest.
10433                 In order to avoid interference, this must done before parallel
10434                 tasks are started.
10435                 """
10436
10437                 if '--fetchonly' in self.myopts:
10438                         return os.EX_OK
10439
10440                 digest = '--digest' in self.myopts
10441                 if not digest:
10442                         for pkgsettings in self.pkgsettings.itervalues():
10443                                 if 'digest' in pkgsettings.features:
10444                                         digest = True
10445                                         break
10446
10447                 if not digest:
10448                         return os.EX_OK
10449
10450                 for x in self._mergelist:
10451                         if not isinstance(x, Package) or \
10452                                 x.type_name != 'ebuild' or \
10453                                 x.operation != 'merge':
10454                                 continue
10455                         pkgsettings = self.pkgsettings[x.root]
10456                         if '--digest' not in self.myopts and \
10457                                 'digest' not in pkgsettings.features:
10458                                 continue
10459                         portdb = x.root_config.trees['porttree'].dbapi
10460                         ebuild_path = portdb.findname(x.cpv)
10461                         if not ebuild_path:
10462                                 writemsg_level(
10463                                         "!!! Could not locate ebuild for '%s'.\n" \
10464                                         % x.cpv, level=logging.ERROR, noiselevel=-1)
10465                                 return 1
10466                         pkgsettings['O'] = os.path.dirname(ebuild_path)
10467                         if not portage.digestgen([], pkgsettings, myportdb=portdb):
10468                                 writemsg_level(
10469                                         "!!! Unable to generate manifest for '%s'.\n" \
10470                                         % x.cpv, level=logging.ERROR, noiselevel=-1)
10471                                 return 1
10472
10473                 return os.EX_OK
10474
10475         def _check_manifests(self):
10476                 # Verify all the manifests now so that the user is notified of failure
10477                 # as soon as possible.
10478                 if "strict" not in self.settings.features or \
10479                         "--fetchonly" in self.myopts or \
10480                         "--fetch-all-uri" in self.myopts:
10481                         return os.EX_OK
10482
10483                 shown_verifying_msg = False
10484                 quiet_settings = {}
10485                 for myroot, pkgsettings in self.pkgsettings.iteritems():
10486                         quiet_config = portage.config(clone=pkgsettings)
10487                         quiet_config["PORTAGE_QUIET"] = "1"
10488                         quiet_config.backup_changes("PORTAGE_QUIET")
10489                         quiet_settings[myroot] = quiet_config
10490                         del quiet_config
10491
10492                 for x in self._mergelist:
10493                         if not isinstance(x, Package) or \
10494                                 x.type_name != "ebuild":
10495                                 continue
10496
10497                         if not shown_verifying_msg:
10498                                 shown_verifying_msg = True
10499                                 self._status_msg("Verifying ebuild manifests")
10500
10501                         root_config = x.root_config
10502                         portdb = root_config.trees["porttree"].dbapi
10503                         quiet_config = quiet_settings[root_config.root]
10504                         quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10505                         if not portage.digestcheck([], quiet_config, strict=True):
10506                                 return 1
10507
10508                 return os.EX_OK
10509
10510         def _add_prefetchers(self):
10511
10512                 if not self._parallel_fetch:
10513                         return
10514
10515                 if self._parallel_fetch:
10516                         self._status_msg("Starting parallel fetch")
10517
10518                         prefetchers = self._prefetchers
10519                         getbinpkg = "--getbinpkg" in self.myopts
10520
10521                         # In order to avoid "waiting for lock" messages
10522                         # at the beginning, which annoy users, never
10523                         # spawn a prefetcher for the first package.
10524                         for pkg in self._mergelist[1:]:
10525                                 prefetcher = self._create_prefetcher(pkg)
10526                                 if prefetcher is not None:
10527                                         self._task_queues.fetch.add(prefetcher)
10528                                         prefetchers[pkg] = prefetcher
10529
10530         def _create_prefetcher(self, pkg):
10531                 """
10532                 @return: a prefetcher, or None if not applicable
10533                 """
10534                 prefetcher = None
10535
10536                 if not isinstance(pkg, Package):
10537                         pass
10538
10539                 elif pkg.type_name == "ebuild":
10540
10541                         prefetcher = EbuildFetcher(background=True,
10542                                 config_pool=self._ConfigPool(pkg.root,
10543                                 self._allocate_config, self._deallocate_config),
10544                                 fetchonly=1, logfile=self._fetch_log,
10545                                 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10546
10547                 elif pkg.type_name == "binary" and \
10548                         "--getbinpkg" in self.myopts and \
10549                         pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10550
10551                         prefetcher = BinpkgPrefetcher(background=True,
10552                                 pkg=pkg, scheduler=self._sched_iface)
10553
10554                 return prefetcher
10555
10556         def _is_restart_scheduled(self):
10557                 """
10558                 Check if the merge list contains a replacement
10559                 for the current running instance, that will result
10560                 in restart after merge.
10561                 @rtype: bool
10562                 @returns: True if a restart is scheduled, False otherwise.
10563                 """
10564                 if self._opts_no_restart.intersection(self.myopts):
10565                         return False
10566
10567                 mergelist = self._mergelist
10568
10569                 for i, pkg in enumerate(mergelist):
10570                         if self._is_restart_necessary(pkg) and \
10571                                 i != len(mergelist) - 1:
10572                                 return True
10573
10574                 return False
10575
10576         def _is_restart_necessary(self, pkg):
10577                 """
10578                 @return: True if merging the given package
10579                         requires restart, False otherwise.
10580                 """
10581
10582                 # Figure out if we need a restart.
10583                 if pkg.root == self._running_root.root and \
10584                         portage.match_from_list(
10585                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10586                         if self._running_portage:
10587                                 return pkg.cpv != self._running_portage.cpv
10588                         return True
10589                 return False
10590
10591         def _restart_if_necessary(self, pkg):
10592                 """
10593                 Use execv() to restart emerge. This happens
10594                 if portage upgrades itself and there are
10595                 remaining packages in the list.
10596                 """
10597
10598                 if self._opts_no_restart.intersection(self.myopts):
10599                         return
10600
10601                 if not self._is_restart_necessary(pkg):
10602                         return
10603
10604                 if pkg == self._mergelist[-1]:
10605                         return
10606
10607                 self._main_loop_cleanup()
10608
10609                 logger = self._logger
10610                 pkg_count = self._pkg_count
10611                 mtimedb = self._mtimedb
10612                 bad_resume_opts = self._bad_resume_opts
10613
10614                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10615                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10616
10617                 logger.log(" *** RESTARTING " + \
10618                         "emerge via exec() after change of " + \
10619                         "portage version.")
10620
10621                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10622                 mtimedb.commit()
10623                 portage.run_exitfuncs()
10624                 mynewargv = [sys.argv[0], "--resume"]
10625                 resume_opts = self.myopts.copy()
10626                 # For automatic resume, we need to prevent
10627                 # any of bad_resume_opts from leaking in
10628                 # via EMERGE_DEFAULT_OPTS.
10629                 resume_opts["--ignore-default-opts"] = True
10630                 for myopt, myarg in resume_opts.iteritems():
10631                         if myopt not in bad_resume_opts:
10632                                 if myarg is True:
10633                                         mynewargv.append(myopt)
10634                                 else:
10635                                         mynewargv.append(myopt +"="+ str(myarg))
10636                 # priority only needs to be adjusted on the first run
10637                 os.environ["PORTAGE_NICENESS"] = "0"
10638                 os.execv(mynewargv[0], mynewargv)
10639
10640         def merge(self):
10641
10642                 if "--resume" in self.myopts:
10643                         # We're resuming.
10644                         portage.writemsg_stdout(
10645                                 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10646                         self._logger.log(" *** Resuming merge...")
10647
10648                 self._save_resume_list()
10649
10650                 try:
10651                         self._background = self._background_mode()
10652                 except self._unknown_internal_error:
10653                         return 1
10654
10655                 for root in self.trees:
10656                         root_config = self.trees[root]["root_config"]
10657
10658                         # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10659                         # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10660                         # for ensuring sane $PWD (bug #239560) and storing elog messages.
10661                         tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10662                         if not tmpdir or not os.path.isdir(tmpdir):
10663                                 msg = "The directory specified in your " + \
10664                                         "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10665                                 "does not exist. Please create this " + \
10666                                 "directory or correct your PORTAGE_TMPDIR setting."
10667                                 msg = textwrap.wrap(msg, 70)
10668                                 out = portage.output.EOutput()
10669                                 for l in msg:
10670                                         out.eerror(l)
10671                                 return 1
10672
10673                         if self._background:
10674                                 root_config.settings.unlock()
10675                                 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10676                                 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10677                                 root_config.settings.lock()
10678
10679                         self.pkgsettings[root] = portage.config(
10680                                 clone=root_config.settings)
10681
10682                 rval = self._generate_digests()
10683                 if rval != os.EX_OK:
10684                         return rval
10685
10686                 rval = self._check_manifests()
10687                 if rval != os.EX_OK:
10688                         return rval
10689
10690                 keep_going = "--keep-going" in self.myopts
10691                 fetchonly = self._build_opts.fetchonly
10692                 mtimedb = self._mtimedb
10693                 failed_pkgs = self._failed_pkgs
10694
10695                 while True:
10696                         rval = self._merge()
10697                         if rval == os.EX_OK or fetchonly or not keep_going:
10698                                 break
10699                         if "resume" not in mtimedb:
10700                                 break
10701                         mergelist = self._mtimedb["resume"].get("mergelist")
10702                         if not mergelist:
10703                                 break
10704
10705                         if not failed_pkgs:
10706                                 break
10707
10708                         for failed_pkg in failed_pkgs:
10709                                 mergelist.remove(list(failed_pkg.pkg))
10710
10711                         self._failed_pkgs_all.extend(failed_pkgs)
10712                         del failed_pkgs[:]
10713
10714                         if not mergelist:
10715                                 break
10716
10717                         if not self._calc_resume_list():
10718                                 break
10719
10720                         clear_caches(self.trees)
10721                         if not self._mergelist:
10722                                 break
10723
10724                         self._save_resume_list()
10725                         self._pkg_count.curval = 0
10726                         self._pkg_count.maxval = len([x for x in self._mergelist \
10727                                 if isinstance(x, Package) and x.operation == "merge"])
10728                         self._status_display.maxval = self._pkg_count.maxval
10729
10730                 self._logger.log(" *** Finished. Cleaning up...")
10731
10732                 if failed_pkgs:
10733                         self._failed_pkgs_all.extend(failed_pkgs)
10734                         del failed_pkgs[:]
10735
10736                 background = self._background
10737                 failure_log_shown = False
10738                 if background and len(self._failed_pkgs_all) == 1:
10739                         # If only one package failed then just show it's
10740                         # whole log for easy viewing.
10741                         failed_pkg = self._failed_pkgs_all[-1]
10742                         build_dir = failed_pkg.build_dir
10743                         log_file = None
10744
10745                         log_paths = [failed_pkg.build_log]
10746
10747                         log_path = self._locate_failure_log(failed_pkg)
10748                         if log_path is not None:
10749                                 try:
10750                                         log_file = open(log_path)
10751                                 except IOError:
10752                                         pass
10753
10754                         if log_file is not None:
10755                                 try:
10756                                         for line in log_file:
10757                                                 writemsg_level(line, noiselevel=-1)
10758                                 finally:
10759                                         log_file.close()
10760                                 failure_log_shown = True
10761
10762                 # Dump mod_echo output now since it tends to flood the terminal.
10763                 # This allows us to avoid having more important output, generated
10764                 # later, from being swept away by the mod_echo output.
10765                 mod_echo_output =  _flush_elog_mod_echo()
10766
10767                 if background and not failure_log_shown and \
10768                         self._failed_pkgs_all and \
10769                         self._failed_pkgs_die_msgs and \
10770                         not mod_echo_output:
10771
10772                         printer = portage.output.EOutput()
10773                         for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10774                                 root_msg = ""
10775                                 if mysettings["ROOT"] != "/":
10776                                         root_msg = " merged to %s" % mysettings["ROOT"]
10777                                 print
10778                                 printer.einfo("Error messages for package %s%s:" % \
10779                                         (colorize("INFORM", key), root_msg))
10780                                 print
10781                                 for phase in portage.const.EBUILD_PHASES:
10782                                         if phase not in logentries:
10783                                                 continue
10784                                         for msgtype, msgcontent in logentries[phase]:
10785                                                 if isinstance(msgcontent, basestring):
10786                                                         msgcontent = [msgcontent]
10787                                                 for line in msgcontent:
10788                                                         printer.eerror(line.strip("\n"))
10789
10790                 if self._post_mod_echo_msgs:
10791                         for msg in self._post_mod_echo_msgs:
10792                                 msg()
10793
10794                 if len(self._failed_pkgs_all) > 1 or \
10795                         (self._failed_pkgs_all and "--keep-going" in self.myopts):
10796                         if len(self._failed_pkgs_all) > 1:
10797                                 msg = "The following %d packages have " % \
10798                                         len(self._failed_pkgs_all) + \
10799                                         "failed to build or install:"
10800                         else:
10801                                 msg = "The following package has " + \
10802                                         "failed to build or install:"
10803                         prefix = bad(" * ")
10804                         writemsg(prefix + "\n", noiselevel=-1)
10805                         from textwrap import wrap
10806                         for line in wrap(msg, 72):
10807                                 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10808                         writemsg(prefix + "\n", noiselevel=-1)
10809                         for failed_pkg in self._failed_pkgs_all:
10810                                 writemsg("%s\t%s\n" % (prefix,
10811                                         colorize("INFORM", str(failed_pkg.pkg))),
10812                                         noiselevel=-1)
10813                         writemsg(prefix + "\n", noiselevel=-1)
10814
10815                 return rval
10816
10817         def _elog_listener(self, mysettings, key, logentries, fulltext):
10818                 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10819                 if errors:
10820                         self._failed_pkgs_die_msgs.append(
10821                                 (mysettings, key, errors))
10822
10823         def _locate_failure_log(self, failed_pkg):
10824
10825                 build_dir = failed_pkg.build_dir
10826                 log_file = None
10827
10828                 log_paths = [failed_pkg.build_log]
10829
10830                 for log_path in log_paths:
10831                         if not log_path:
10832                                 continue
10833
10834                         try:
10835                                 log_size = os.stat(log_path).st_size
10836                         except OSError:
10837                                 continue
10838
10839                         if log_size == 0:
10840                                 continue
10841
10842                         return log_path
10843
10844                 return None
10845
10846         def _add_packages(self):
10847                 pkg_queue = self._pkg_queue
10848                 for pkg in self._mergelist:
10849                         if isinstance(pkg, Package):
10850                                 pkg_queue.append(pkg)
10851                         elif isinstance(pkg, Blocker):
10852                                 pass
10853
10854         def _system_merge_started(self, merge):
10855                 """
10856                 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
10857                 """
10858                 graph = self._digraph
10859                 if graph is None:
10860                         return
10861                 pkg = merge.merge.pkg
10862
10863                 # Skip this if $ROOT != / since it shouldn't matter if there
10864                 # are unsatisfied system runtime deps in this case.
10865                 if pkg.root != '/':
10866                         return
10867
10868                 completed_tasks = self._completed_tasks
10869                 unsatisfied = self._unsatisfied_system_deps
10870
10871                 def ignore_non_runtime_or_satisfied(priority):
10872                         """
10873                         Ignore non-runtime and satisfied runtime priorities.
10874                         """
10875                         if isinstance(priority, DepPriority) and \
10876                                 not priority.satisfied and \
10877                                 (priority.runtime or priority.runtime_post):
10878                                 return False
10879                         return True
10880
10881                 # When checking for unsatisfied runtime deps, only check
10882                 # direct deps since indirect deps are checked when the
10883                 # corresponding parent is merged.
10884                 for child in graph.child_nodes(pkg,
10885                         ignore_priority=ignore_non_runtime_or_satisfied):
10886                         if not isinstance(child, Package) or \
10887                                 child.operation == 'uninstall':
10888                                 continue
10889                         if child is pkg:
10890                                 continue
10891                         if child.operation == 'merge' and \
10892                                 child not in completed_tasks:
10893                                 unsatisfied.add(child)
10894
10895         def _merge_wait_exit_handler(self, task):
10896                 self._merge_wait_scheduled.remove(task)
10897                 self._merge_exit(task)
10898
10899         def _merge_exit(self, merge):
10900                 self._do_merge_exit(merge)
10901                 self._deallocate_config(merge.merge.settings)
10902                 if merge.returncode == os.EX_OK and \
10903                         not merge.merge.pkg.installed:
10904                         self._status_display.curval += 1
10905                 self._status_display.merges = len(self._task_queues.merge)
10906                 self._schedule()
10907
10908         def _do_merge_exit(self, merge):
10909                 pkg = merge.merge.pkg
10910                 if merge.returncode != os.EX_OK:
10911                         settings = merge.merge.settings
10912                         build_dir = settings.get("PORTAGE_BUILDDIR")
10913                         build_log = settings.get("PORTAGE_LOG_FILE")
10914
10915                         self._failed_pkgs.append(self._failed_pkg(
10916                                 build_dir=build_dir, build_log=build_log,
10917                                 pkg=pkg,
10918                                 returncode=merge.returncode))
10919                         self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
10920
10921                         self._status_display.failed = len(self._failed_pkgs)
10922                         return
10923
10924                 self._task_complete(pkg)
10925                 pkg_to_replace = merge.merge.pkg_to_replace
10926                 if pkg_to_replace is not None:
10927                         # When a package is replaced, mark it's uninstall
10928                         # task complete (if any).
10929                         uninst_hash_key = \
10930                                 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
10931                         self._task_complete(uninst_hash_key)
10932
10933                 if pkg.installed:
10934                         return
10935
10936                 self._restart_if_necessary(pkg)
10937
10938                 # Call mtimedb.commit() after each merge so that
10939                 # --resume still works after being interrupted
10940                 # by reboot, sigkill or similar.
10941                 mtimedb = self._mtimedb
10942                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10943                 if not mtimedb["resume"]["mergelist"]:
10944                         del mtimedb["resume"]
10945                 mtimedb.commit()
10946
10947         def _build_exit(self, build):
10948                 if build.returncode == os.EX_OK:
10949                         self.curval += 1
10950                         merge = PackageMerge(merge=build)
10951                         if not build.build_opts.buildpkgonly and \
10952                                 build.pkg in self._deep_system_deps:
10953                                 # Since dependencies on system packages are frequently
10954                                 # unspecified, merge them only when no builds are executing.
10955                                 self._merge_wait_queue.append(merge)
10956                                 merge.addStartListener(self._system_merge_started)
10957                         else:
10958                                 merge.addExitListener(self._merge_exit)
10959                                 self._task_queues.merge.add(merge)
10960                                 self._status_display.merges = len(self._task_queues.merge)
10961                 else:
10962                         settings = build.settings
10963                         build_dir = settings.get("PORTAGE_BUILDDIR")
10964                         build_log = settings.get("PORTAGE_LOG_FILE")
10965
10966                         self._failed_pkgs.append(self._failed_pkg(
10967                                 build_dir=build_dir, build_log=build_log,
10968                                 pkg=build.pkg,
10969                                 returncode=build.returncode))
10970                         self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
10971
10972                         self._status_display.failed = len(self._failed_pkgs)
10973                         self._deallocate_config(build.settings)
10974                 self._jobs -= 1
10975                 self._status_display.running = self._jobs
10976                 self._schedule()
10977
10978         def _extract_exit(self, build):
10979                 self._build_exit(build)
10980
10981         def _task_complete(self, pkg):
10982                 self._completed_tasks.add(pkg)
10983                 self._unsatisfied_system_deps.discard(pkg)
10984                 self._choose_pkg_return_early = False
10985
10986         def _merge(self):
10987
10988                 self._add_prefetchers()
10989                 self._add_packages()
10990                 pkg_queue = self._pkg_queue
10991                 failed_pkgs = self._failed_pkgs
10992                 portage.locks._quiet = self._background
10993                 portage.elog._emerge_elog_listener = self._elog_listener
10994                 rval = os.EX_OK
10995
10996                 try:
10997                         self._main_loop()
10998                 finally:
10999                         self._main_loop_cleanup()
11000                         portage.locks._quiet = False
11001                         portage.elog._emerge_elog_listener = None
11002                         if failed_pkgs:
11003                                 rval = failed_pkgs[-1].returncode
11004
11005                 return rval
11006
11007         def _main_loop_cleanup(self):
11008                 del self._pkg_queue[:]
11009                 self._completed_tasks.clear()
11010                 self._deep_system_deps.clear()
11011                 self._unsatisfied_system_deps.clear()
11012                 self._choose_pkg_return_early = False
11013                 self._status_display.reset()
11014                 self._digraph = None
11015                 self._task_queues.fetch.clear()
11016
11017         def _choose_pkg(self):
11018                 """
11019                 Choose a task that has all it's dependencies satisfied.
11020                 """
11021
11022                 if self._choose_pkg_return_early:
11023                         return None
11024
11025                 if self._digraph is None:
11026                         if (self._jobs or self._task_queues.merge) and \
11027                                 not ("--nodeps" in self.myopts and \
11028                                 (self._max_jobs is True or self._max_jobs > 1)):
11029                                 self._choose_pkg_return_early = True
11030                                 return None
11031                         return self._pkg_queue.pop(0)
11032
11033                 if not (self._jobs or self._task_queues.merge):
11034                         return self._pkg_queue.pop(0)
11035
11036                 self._prune_digraph()
11037
11038                 chosen_pkg = None
11039                 later = set(self._pkg_queue)
11040                 for pkg in self._pkg_queue:
11041                         later.remove(pkg)
11042                         if not self._dependent_on_scheduled_merges(pkg, later):
11043                                 chosen_pkg = pkg
11044                                 break
11045
11046                 if chosen_pkg is not None:
11047                         self._pkg_queue.remove(chosen_pkg)
11048
11049                 if chosen_pkg is None:
11050                         # There's no point in searching for a package to
11051                         # choose until at least one of the existing jobs
11052                         # completes.
11053                         self._choose_pkg_return_early = True
11054
11055                 return chosen_pkg
11056
11057         def _dependent_on_scheduled_merges(self, pkg, later):
11058                 """
11059                 Traverse the subgraph of the given packages deep dependencies
11060                 to see if it contains any scheduled merges.
11061                 @param pkg: a package to check dependencies for
11062                 @type pkg: Package
11063                 @param later: packages for which dependence should be ignored
11064                         since they will be merged later than pkg anyway and therefore
11065                         delaying the merge of pkg will not result in a more optimal
11066                         merge order
11067                 @type later: set
11068                 @rtype: bool
11069                 @returns: True if the package is dependent, False otherwise.
11070                 """
11071
11072                 graph = self._digraph
11073                 completed_tasks = self._completed_tasks
11074
11075                 dependent = False
11076                 traversed_nodes = set([pkg])
11077                 direct_deps = graph.child_nodes(pkg)
11078                 node_stack = direct_deps
11079                 direct_deps = frozenset(direct_deps)
11080                 while node_stack:
11081                         node = node_stack.pop()
11082                         if node in traversed_nodes:
11083                                 continue
11084                         traversed_nodes.add(node)
11085                         if not ((node.installed and node.operation == "nomerge") or \
11086                                 (node.operation == "uninstall" and \
11087                                 node not in direct_deps) or \
11088                                 node in completed_tasks or \
11089                                 node in later):
11090                                 dependent = True
11091                                 break
11092                         node_stack.extend(graph.child_nodes(node))
11093
11094                 return dependent
11095
11096         def _allocate_config(self, root):
11097                 """
11098                 Allocate a unique config instance for a task in order
11099                 to prevent interference between parallel tasks.
11100                 """
11101                 if self._config_pool[root]:
11102                         temp_settings = self._config_pool[root].pop()
11103                 else:
11104                         temp_settings = portage.config(clone=self.pkgsettings[root])
11105                 # Since config.setcpv() isn't guaranteed to call config.reset() due to
11106                 # performance reasons, call it here to make sure all settings from the
11107                 # previous package get flushed out (such as PORTAGE_LOG_FILE).
11108                 temp_settings.reload()
11109                 temp_settings.reset()
11110                 return temp_settings
11111
11112         def _deallocate_config(self, settings):
11113                 self._config_pool[settings["ROOT"]].append(settings)
11114
11115         def _main_loop(self):
11116
11117                 # Only allow 1 job max if a restart is scheduled
11118                 # due to portage update.
11119                 if self._is_restart_scheduled() or \
11120                         self._opts_no_background.intersection(self.myopts):
11121                         self._set_max_jobs(1)
11122
11123                 merge_queue = self._task_queues.merge
11124
11125                 while self._schedule():
11126                         if self._poll_event_handlers:
11127                                 self._poll_loop()
11128
11129                 while True:
11130                         self._schedule()
11131                         if not (self._jobs or merge_queue):
11132                                 break
11133                         if self._poll_event_handlers:
11134                                 self._poll_loop()
11135
11136         def _keep_scheduling(self):
11137                 return bool(self._pkg_queue and \
11138                         not (self._failed_pkgs and not self._build_opts.fetchonly))
11139
11140         def _schedule_tasks(self):
11141
11142                 # When the number of jobs drops to zero, process all waiting merges.
11143                 if not self._jobs and self._merge_wait_queue:
11144                         for task in self._merge_wait_queue:
11145                                 task.addExitListener(self._merge_wait_exit_handler)
11146                                 self._task_queues.merge.add(task)
11147                         self._status_display.merges = len(self._task_queues.merge)
11148                         self._merge_wait_scheduled.extend(self._merge_wait_queue)
11149                         del self._merge_wait_queue[:]
11150
11151                 self._schedule_tasks_imp()
11152                 self._status_display.display()
11153
11154                 state_change = 0
11155                 for q in self._task_queues.values():
11156                         if q.schedule():
11157                                 state_change += 1
11158
11159                 # Cancel prefetchers if they're the only reason
11160                 # the main poll loop is still running.
11161                 if self._failed_pkgs and not self._build_opts.fetchonly and \
11162                         not (self._jobs or self._task_queues.merge) and \
11163                         self._task_queues.fetch:
11164                         self._task_queues.fetch.clear()
11165                         state_change += 1
11166
11167                 if state_change:
11168                         self._schedule_tasks_imp()
11169                         self._status_display.display()
11170
11171                 return self._keep_scheduling()
11172
11173         def _job_delay(self):
11174                 """
11175                 @rtype: bool
11176                 @returns: True if job scheduling should be delayed, False otherwise.
11177                 """
11178
11179                 if self._jobs and self._max_load is not None:
11180
11181                         current_time = time.time()
11182
11183                         delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
11184                         if delay > self._job_delay_max:
11185                                 delay = self._job_delay_max
11186                         if (current_time - self._previous_job_start_time) < delay:
11187                                 return True
11188
11189                 return False
11190
11191         def _schedule_tasks_imp(self):
11192                 """
11193                 @rtype: bool
11194                 @returns: True if state changed, False otherwise.
11195                 """
11196
11197                 state_change = 0
11198
11199                 while True:
11200
11201                         if not self._keep_scheduling():
11202                                 return bool(state_change)
11203
11204                         if self._choose_pkg_return_early or \
11205                                 self._merge_wait_scheduled or \
11206                                 (self._jobs and self._unsatisfied_system_deps) or \
11207                                 not self._can_add_job() or \
11208                                 self._job_delay():
11209                                 return bool(state_change)
11210
11211                         pkg = self._choose_pkg()
11212                         if pkg is None:
11213                                 return bool(state_change)
11214
11215                         state_change += 1
11216
11217                         if not pkg.installed:
11218                                 self._pkg_count.curval += 1
11219
11220                         task = self._task(pkg)
11221
11222                         if pkg.installed:
11223                                 merge = PackageMerge(merge=task)
11224                                 merge.addExitListener(self._merge_exit)
11225                                 self._task_queues.merge.add(merge)
11226
11227                         elif pkg.built:
11228                                 self._jobs += 1
11229                                 self._previous_job_start_time = time.time()
11230                                 self._status_display.running = self._jobs
11231                                 task.addExitListener(self._extract_exit)
11232                                 self._task_queues.jobs.add(task)
11233
11234                         else:
11235                                 self._jobs += 1
11236                                 self._previous_job_start_time = time.time()
11237                                 self._status_display.running = self._jobs
11238                                 task.addExitListener(self._build_exit)
11239                                 self._task_queues.jobs.add(task)
11240
11241                 return bool(state_change)
11242
11243         def _task(self, pkg):
11244
11245                 pkg_to_replace = None
11246                 if pkg.operation != "uninstall":
11247                         vardb = pkg.root_config.trees["vartree"].dbapi
11248                         previous_cpv = vardb.match(pkg.slot_atom)
11249                         if previous_cpv:
11250                                 previous_cpv = previous_cpv.pop()
11251                                 pkg_to_replace = self._pkg(previous_cpv,
11252                                         "installed", pkg.root_config, installed=True)
11253
11254                 task = MergeListItem(args_set=self._args_set,
11255                         background=self._background, binpkg_opts=self._binpkg_opts,
11256                         build_opts=self._build_opts,
11257                         config_pool=self._ConfigPool(pkg.root,
11258                         self._allocate_config, self._deallocate_config),
11259                         emerge_opts=self.myopts,
11260                         find_blockers=self._find_blockers(pkg), logger=self._logger,
11261                         mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
11262                         pkg_to_replace=pkg_to_replace,
11263                         prefetcher=self._prefetchers.get(pkg),
11264                         scheduler=self._sched_iface,
11265                         settings=self._allocate_config(pkg.root),
11266                         statusMessage=self._status_msg,
11267                         world_atom=self._world_atom)
11268
11269                 return task
11270
11271         def _failed_pkg_msg(self, failed_pkg, action, preposition):
11272                 pkg = failed_pkg.pkg
11273                 msg = "%s to %s %s" % \
11274                         (bad("Failed"), action, colorize("INFORM", pkg.cpv))
11275                 if pkg.root != "/":
11276                         msg += " %s %s" % (preposition, pkg.root)
11277
11278                 log_path = self._locate_failure_log(failed_pkg)
11279                 if log_path is not None:
11280                         msg += ", Log file:"
11281                 self._status_msg(msg)
11282
11283                 if log_path is not None:
11284                         self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
11285
11286         def _status_msg(self, msg):
11287                 """
11288                 Display a brief status message (no newlines) in the status display.
11289                 This is called by tasks to provide feedback to the user. This
11290                 delegates the resposibility of generating \r and \n control characters,
11291                 to guarantee that lines are created or erased when necessary and
11292                 appropriate.
11293
11294                 @type msg: str
11295                 @param msg: a brief status message (no newlines allowed)
11296                 """
11297                 if not self._background:
11298                         writemsg_level("\n")
11299                 self._status_display.displayMessage(msg)
11300
11301         def _save_resume_list(self):
11302                 """
11303                 Do this before verifying the ebuild Manifests since it might
11304                 be possible for the user to use --resume --skipfirst get past
11305                 a non-essential package with a broken digest.
11306                 """
11307                 mtimedb = self._mtimedb
11308                 mtimedb["resume"]["mergelist"] = [list(x) \
11309                         for x in self._mergelist \
11310                         if isinstance(x, Package) and x.operation == "merge"]
11311
11312                 mtimedb.commit()
11313
11314         def _calc_resume_list(self):
11315                 """
11316                 Use the current resume list to calculate a new one,
11317                 dropping any packages with unsatisfied deps.
11318                 @rtype: bool
11319                 @returns: True if successful, False otherwise.
11320                 """
11321                 print colorize("GOOD", "*** Resuming merge...")
11322
11323                 if self._show_list():
11324                         if "--tree" in self.myopts:
11325                                 portage.writemsg_stdout("\n" + \
11326                                         darkgreen("These are the packages that " + \
11327                                         "would be merged, in reverse order:\n\n"))
11328
11329                         else:
11330                                 portage.writemsg_stdout("\n" + \
11331                                         darkgreen("These are the packages that " + \
11332                                         "would be merged, in order:\n\n"))
11333
11334                 show_spinner = "--quiet" not in self.myopts and \
11335                         "--nodeps" not in self.myopts
11336
11337                 if show_spinner:
11338                         print "Calculating dependencies  ",
11339
11340                 myparams = create_depgraph_params(self.myopts, None)
11341                 success = False
11342                 e = None
11343                 try:
11344                         success, mydepgraph, dropped_tasks = resume_depgraph(
11345                                 self.settings, self.trees, self._mtimedb, self.myopts,
11346                                 myparams, self._spinner)
11347                 except depgraph.UnsatisfiedResumeDep, exc:
11348                         # rename variable to avoid python-3.0 error:
11349                         # SyntaxError: can not delete variable 'e' referenced in nested
11350                         #              scope
11351                         e = exc
11352                         mydepgraph = e.depgraph
11353                         dropped_tasks = set()
11354
11355                 if show_spinner:
11356                         print "\b\b... done!"
11357
11358                 if e is not None:
11359                         def unsatisfied_resume_dep_msg():
11360                                 mydepgraph.display_problems()
11361                                 out = portage.output.EOutput()
11362                                 out.eerror("One or more packages are either masked or " + \
11363                                         "have missing dependencies:")
11364                                 out.eerror("")
11365                                 indent = "  "
11366                                 show_parents = set()
11367                                 for dep in e.value:
11368                                         if dep.parent in show_parents:
11369                                                 continue
11370                                         show_parents.add(dep.parent)
11371                                         if dep.atom is None:
11372                                                 out.eerror(indent + "Masked package:")
11373                                                 out.eerror(2 * indent + str(dep.parent))
11374                                                 out.eerror("")
11375                                         else:
11376                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
11377                                                 out.eerror(2 * indent + str(dep.parent))
11378                                                 out.eerror("")
11379                                 msg = "The resume list contains packages " + \
11380                                         "that are either masked or have " + \
11381                                         "unsatisfied dependencies. " + \
11382                                         "Please restart/continue " + \
11383                                         "the operation manually, or use --skipfirst " + \
11384                                         "to skip the first package in the list and " + \
11385                                         "any other packages that may be " + \
11386                                         "masked or have missing dependencies."
11387                                 for line in textwrap.wrap(msg, 72):
11388                                         out.eerror(line)
11389                         self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
11390                         return False
11391
11392                 if success and self._show_list():
11393                         mylist = mydepgraph.altlist()
11394                         if mylist:
11395                                 if "--tree" in self.myopts:
11396                                         mylist.reverse()
11397                                 mydepgraph.display(mylist, favorites=self._favorites)
11398
11399                 if not success:
11400                         self._post_mod_echo_msgs.append(mydepgraph.display_problems)
11401                         return False
11402                 mydepgraph.display_problems()
11403
11404                 mylist = mydepgraph.altlist()
11405                 mydepgraph.break_refs(mylist)
11406                 mydepgraph.break_refs(dropped_tasks)
11407                 self._mergelist = mylist
11408                 self._set_digraph(mydepgraph.schedulerGraph())
11409
11410                 msg_width = 75
11411                 for task in dropped_tasks:
11412                         if not (isinstance(task, Package) and task.operation == "merge"):
11413                                 continue
11414                         pkg = task
11415                         msg = "emerge --keep-going:" + \
11416                                 " %s" % (pkg.cpv,)
11417                         if pkg.root != "/":
11418                                 msg += " for %s" % (pkg.root,)
11419                         msg += " dropped due to unsatisfied dependency."
11420                         for line in textwrap.wrap(msg, msg_width):
11421                                 eerror(line, phase="other", key=pkg.cpv)
11422                         settings = self.pkgsettings[pkg.root]
11423                         # Ensure that log collection from $T is disabled inside
11424                         # elog_process(), since any logs that might exist are
11425                         # not valid here.
11426                         settings.pop("T", None)
11427                         portage.elog.elog_process(pkg.cpv, settings)
11428                         self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
11429
11430                 return True
11431
11432         def _show_list(self):
11433                 myopts = self.myopts
11434                 if "--quiet" not in myopts and \
11435                         ("--ask" in myopts or "--tree" in myopts or \
11436                         "--verbose" in myopts):
11437                         return True
11438                 return False
11439
11440         def _world_atom(self, pkg):
11441                 """
11442                 Add the package to the world file, but only if
11443                 it's supposed to be added. Otherwise, do nothing.
11444                 """
11445
11446                 if set(("--buildpkgonly", "--fetchonly",
11447                         "--fetch-all-uri",
11448                         "--oneshot", "--onlydeps",
11449                         "--pretend")).intersection(self.myopts):
11450                         return
11451
11452                 if pkg.root != self.target_root:
11453                         return
11454
11455                 args_set = self._args_set
11456                 if not args_set.findAtomForPackage(pkg):
11457                         return
11458
11459                 logger = self._logger
11460                 pkg_count = self._pkg_count
11461                 root_config = pkg.root_config
11462                 world_set = root_config.sets["world"]
11463                 world_locked = False
11464                 if hasattr(world_set, "lock"):
11465                         world_set.lock()
11466                         world_locked = True
11467
11468                 try:
11469                         if hasattr(world_set, "load"):
11470                                 world_set.load() # maybe it's changed on disk
11471
11472                         atom = create_world_atom(pkg, args_set, root_config)
11473                         if atom:
11474                                 if hasattr(world_set, "add"):
11475                                         self._status_msg(('Recording %s in "world" ' + \
11476                                                 'favorites file...') % atom)
11477                                         logger.log(" === (%s of %s) Updating world file (%s)" % \
11478                                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
11479                                         world_set.add(atom)
11480                                 else:
11481                                         writemsg_level('\n!!! Unable to record %s in "world"\n' % \
11482                                                 (atom,), level=logging.WARN, noiselevel=-1)
11483                 finally:
11484                         if world_locked:
11485                                 world_set.unlock()
11486
11487         def _pkg(self, cpv, type_name, root_config, installed=False):
11488                 """
11489                 Get a package instance from the cache, or create a new
11490                 one if necessary. Raises KeyError from aux_get if it
11491                 failures for some reason (package does not exist or is
11492                 corrupt).
11493                 """
11494                 operation = "merge"
11495                 if installed:
11496                         operation = "nomerge"
11497
11498                 if self._digraph is not None:
11499                         # Reuse existing instance when available.
11500                         pkg = self._digraph.get(
11501                                 (type_name, root_config.root, cpv, operation))
11502                         if pkg is not None:
11503                                 return pkg
11504
11505                 tree_type = depgraph.pkg_tree_map[type_name]
11506                 db = root_config.trees[tree_type].dbapi
11507                 db_keys = list(self.trees[root_config.root][
11508                         tree_type].dbapi._aux_cache_keys)
11509                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11510                 pkg = Package(cpv=cpv, metadata=metadata,
11511                         root_config=root_config, installed=installed)
11512                 if type_name == "ebuild":
11513                         settings = self.pkgsettings[root_config.root]
11514                         settings.setcpv(pkg)
11515                         pkg.metadata["USE"] = settings["PORTAGE_USE"]
11516                         pkg.metadata['CHOST'] = settings.get('CHOST', '')
11517
11518                 return pkg
11519
11520 class MetadataRegen(PollScheduler):
11521
11522         def __init__(self, portdb, max_jobs=None, max_load=None):
11523                 PollScheduler.__init__(self)
11524                 self._portdb = portdb
11525
11526                 if max_jobs is None:
11527                         max_jobs = 1
11528
11529                 self._max_jobs = max_jobs
11530                 self._max_load = max_load
11531                 self._sched_iface = self._sched_iface_class(
11532                         register=self._register,
11533                         schedule=self._schedule_wait,
11534                         unregister=self._unregister)
11535
11536                 self._valid_pkgs = set()
11537                 self._process_iter = self._iter_metadata_processes()
11538                 self.returncode = os.EX_OK
11539                 self._error_count = 0
11540
11541         def _iter_metadata_processes(self):
11542                 portdb = self._portdb
11543                 valid_pkgs = self._valid_pkgs
11544                 every_cp = portdb.cp_all()
11545                 every_cp.sort(reverse=True)
11546
11547                 while every_cp:
11548                         cp = every_cp.pop()
11549                         portage.writemsg_stdout("Processing %s\n" % cp)
11550                         cpv_list = portdb.cp_list(cp)
11551                         for cpv in cpv_list:
11552                                 valid_pkgs.add(cpv)
11553                                 ebuild_path, repo_path = portdb.findname2(cpv)
11554                                 metadata_process = portdb._metadata_process(
11555                                         cpv, ebuild_path, repo_path)
11556                                 if metadata_process is None:
11557                                         continue
11558                                 yield metadata_process
11559
11560         def run(self):
11561
11562                 portdb = self._portdb
11563                 from portage.cache.cache_errors import CacheError
11564                 dead_nodes = {}
11565
11566                 for mytree in portdb.porttrees:
11567                         try:
11568                                 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11569                         except CacheError, e:
11570                                 portage.writemsg("Error listing cache entries for " + \
11571                                         "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1)
11572                                 del e
11573                                 dead_nodes = None
11574                                 break
11575
11576                 while self._schedule():
11577                         self._poll_loop()
11578
11579                 while self._jobs:
11580                         self._poll_loop()
11581
11582                 if dead_nodes:
11583                         for y in self._valid_pkgs:
11584                                 for mytree in portdb.porttrees:
11585                                         if portdb.findname2(y, mytree=mytree)[0]:
11586                                                 dead_nodes[mytree].discard(y)
11587
11588                         for mytree, nodes in dead_nodes.iteritems():
11589                                 auxdb = portdb.auxdb[mytree]
11590                                 for y in nodes:
11591                                         try:
11592                                                 del auxdb[y]
11593                                         except (KeyError, CacheError):
11594                                                 pass
11595
11596         def _schedule_tasks(self):
11597                 """
11598                 @rtype: bool
11599                 @returns: True if there may be remaining tasks to schedule,
11600                         False otherwise.
11601                 """
11602                 while self._can_add_job():
11603                         try:
11604                                 metadata_process = self._process_iter.next()
11605                         except StopIteration:
11606                                 return False
11607
11608                         self._jobs += 1
11609                         metadata_process.scheduler = self._sched_iface
11610                         metadata_process.addExitListener(self._metadata_exit)
11611                         metadata_process.start()
11612                 return True
11613
11614         def _metadata_exit(self, metadata_process):
11615                 self._jobs -= 1
11616                 if metadata_process.returncode != os.EX_OK:
11617                         self.returncode = 1
11618                         self._error_count += 1
11619                         self._valid_pkgs.discard(metadata_process.cpv)
11620                         portage.writemsg("Error processing %s, continuing...\n" % \
11621                                 (metadata_process.cpv,))
11622                 self._schedule()
11623
11624 class UninstallFailure(portage.exception.PortageException):
11625         """
11626         An instance of this class is raised by unmerge() when
11627         an uninstallation fails.
11628         """
11629         status = 1
11630         def __init__(self, *pargs):
11631                 portage.exception.PortageException.__init__(self, pargs)
11632                 if pargs:
11633                         self.status = pargs[0]
11634
11635 def unmerge(root_config, myopts, unmerge_action,
11636         unmerge_files, ldpath_mtimes, autoclean=0,
11637         clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11638         scheduler=None, writemsg_level=portage.util.writemsg_level):
11639
11640         quiet = "--quiet" in myopts
11641         settings = root_config.settings
11642         sets = root_config.sets
11643         vartree = root_config.trees["vartree"]
11644         candidate_catpkgs=[]
11645         global_unmerge=0
11646         xterm_titles = "notitles" not in settings.features
11647         out = portage.output.EOutput()
11648         pkg_cache = {}
11649         db_keys = list(vartree.dbapi._aux_cache_keys)
11650
11651         def _pkg(cpv):
11652                 pkg = pkg_cache.get(cpv)
11653                 if pkg is None:
11654                         pkg = Package(cpv=cpv, installed=True,
11655                                 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11656                                 root_config=root_config,
11657                                 type_name="installed")
11658                         pkg_cache[cpv] = pkg
11659                 return pkg
11660
11661         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11662         try:
11663                 # At least the parent needs to exist for the lock file.
11664                 portage.util.ensure_dirs(vdb_path)
11665         except portage.exception.PortageException:
11666                 pass
11667         vdb_lock = None
11668         try:
11669                 if os.access(vdb_path, os.W_OK):
11670                         vdb_lock = portage.locks.lockdir(vdb_path)
11671                 realsyslist = sets["system"].getAtoms()
11672                 syslist = []
11673                 for x in realsyslist:
11674                         mycp = portage.dep_getkey(x)
11675                         if mycp in settings.getvirtuals():
11676                                 providers = []
11677                                 for provider in settings.getvirtuals()[mycp]:
11678                                         if vartree.dbapi.match(provider):
11679                                                 providers.append(provider)
11680                                 if len(providers) == 1:
11681                                         syslist.extend(providers)
11682                         else:
11683                                 syslist.append(mycp)
11684         
11685                 mysettings = portage.config(clone=settings)
11686         
11687                 if not unmerge_files:
11688                         if unmerge_action == "unmerge":
11689                                 print
11690                                 print bold("emerge unmerge") + " can only be used with specific package names"
11691                                 print
11692                                 return 0
11693                         else:
11694                                 global_unmerge = 1
11695         
11696                 localtree = vartree
11697                 # process all arguments and add all
11698                 # valid db entries to candidate_catpkgs
11699                 if global_unmerge:
11700                         if not unmerge_files:
11701                                 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11702                 else:
11703                         #we've got command-line arguments
11704                         if not unmerge_files:
11705                                 print "\nNo packages to unmerge have been provided.\n"
11706                                 return 0
11707                         for x in unmerge_files:
11708                                 arg_parts = x.split('/')
11709                                 if x[0] not in [".","/"] and \
11710                                         arg_parts[-1][-7:] != ".ebuild":
11711                                         #possible cat/pkg or dep; treat as such
11712                                         candidate_catpkgs.append(x)
11713                                 elif unmerge_action in ["prune","clean"]:
11714                                         print "\n!!! Prune and clean do not accept individual" + \
11715                                                 " ebuilds as arguments;\n    skipping.\n"
11716                                         continue
11717                                 else:
11718                                         # it appears that the user is specifying an installed
11719                                         # ebuild and we're in "unmerge" mode, so it's ok.
11720                                         if not os.path.exists(x):
11721                                                 print "\n!!! The path '"+x+"' doesn't exist.\n"
11722                                                 return 0
11723         
11724                                         absx   = os.path.abspath(x)
11725                                         sp_absx = absx.split("/")
11726                                         if sp_absx[-1][-7:] == ".ebuild":
11727                                                 del sp_absx[-1]
11728                                                 absx = "/".join(sp_absx)
11729         
11730                                         sp_absx_len = len(sp_absx)
11731         
11732                                         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11733                                         vdb_len  = len(vdb_path)
11734         
11735                                         sp_vdb     = vdb_path.split("/")
11736                                         sp_vdb_len = len(sp_vdb)
11737         
11738                                         if not os.path.exists(absx+"/CONTENTS"):
11739                                                 print "!!! Not a valid db dir: "+str(absx)
11740                                                 return 0
11741         
11742                                         if sp_absx_len <= sp_vdb_len:
11743                                                 # The Path is shorter... so it can't be inside the vdb.
11744                                                 print sp_absx
11745                                                 print absx
11746                                                 print "\n!!!",x,"cannot be inside "+ \
11747                                                         vdb_path+"; aborting.\n"
11748                                                 return 0
11749         
11750                                         for idx in range(0,sp_vdb_len):
11751                                                 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11752                                                         print sp_absx
11753                                                         print absx
11754                                                         print "\n!!!", x, "is not inside "+\
11755                                                                 vdb_path+"; aborting.\n"
11756                                                         return 0
11757         
11758                                         print "="+"/".join(sp_absx[sp_vdb_len:])
11759                                         candidate_catpkgs.append(
11760                                                 "="+"/".join(sp_absx[sp_vdb_len:]))
11761         
11762                 newline=""
11763                 if (not "--quiet" in myopts):
11764                         newline="\n"
11765                 if settings["ROOT"] != "/":
11766                         writemsg_level(darkgreen(newline+ \
11767                                 ">>> Using system located in ROOT tree %s\n" % \
11768                                 settings["ROOT"]))
11769
11770                 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11771                         not ("--quiet" in myopts):
11772                         writemsg_level(darkgreen(newline+\
11773                                 ">>> These are the packages that would be unmerged:\n"))
11774
11775                 # Preservation of order is required for --depclean and --prune so
11776                 # that dependencies are respected. Use all_selected to eliminate
11777                 # duplicate packages since the same package may be selected by
11778                 # multiple atoms.
11779                 pkgmap = []
11780                 all_selected = set()
11781                 for x in candidate_catpkgs:
11782                         # cycle through all our candidate deps and determine
11783                         # what will and will not get unmerged
11784                         try:
11785                                 mymatch = vartree.dbapi.match(x)
11786                         except portage.exception.AmbiguousPackageName, errpkgs:
11787                                 print "\n\n!!! The short ebuild name \"" + \
11788                                         x + "\" is ambiguous.  Please specify"
11789                                 print "!!! one of the following fully-qualified " + \
11790                                         "ebuild names instead:\n"
11791                                 for i in errpkgs[0]:
11792                                         print "    " + green(i)
11793                                 print
11794                                 sys.exit(1)
11795         
11796                         if not mymatch and x[0] not in "<>=~":
11797                                 mymatch = localtree.dep_match(x)
11798                         if not mymatch:
11799                                 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11800                                         (x, unmerge_action), noiselevel=-1)
11801                                 continue
11802
11803                         pkgmap.append(
11804                                 {"protected": set(), "selected": set(), "omitted": set()})
11805                         mykey = len(pkgmap) - 1
11806                         if unmerge_action=="unmerge":
11807                                         for y in mymatch:
11808                                                 if y not in all_selected:
11809                                                         pkgmap[mykey]["selected"].add(y)
11810                                                         all_selected.add(y)
11811                         elif unmerge_action == "prune":
11812                                 if len(mymatch) == 1:
11813                                         continue
11814                                 best_version = mymatch[0]
11815                                 best_slot = vartree.getslot(best_version)
11816                                 best_counter = vartree.dbapi.cpv_counter(best_version)
11817                                 for mypkg in mymatch[1:]:
11818                                         myslot = vartree.getslot(mypkg)
11819                                         mycounter = vartree.dbapi.cpv_counter(mypkg)
11820                                         if (myslot == best_slot and mycounter > best_counter) or \
11821                                                 mypkg == portage.best([mypkg, best_version]):
11822                                                 if myslot == best_slot:
11823                                                         if mycounter < best_counter:
11824                                                                 # On slot collision, keep the one with the
11825                                                                 # highest counter since it is the most
11826                                                                 # recently installed.
11827                                                                 continue
11828                                                 best_version = mypkg
11829                                                 best_slot = myslot
11830                                                 best_counter = mycounter
11831                                 pkgmap[mykey]["protected"].add(best_version)
11832                                 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
11833                                         if mypkg != best_version and mypkg not in all_selected)
11834                                 all_selected.update(pkgmap[mykey]["selected"])
11835                         else:
11836                                 # unmerge_action == "clean"
11837                                 slotmap={}
11838                                 for mypkg in mymatch:
11839                                         if unmerge_action == "clean":
11840                                                 myslot = localtree.getslot(mypkg)
11841                                         else:
11842                                                 # since we're pruning, we don't care about slots
11843                                                 # and put all the pkgs in together
11844                                                 myslot = 0
11845                                         if myslot not in slotmap:
11846                                                 slotmap[myslot] = {}
11847                                         slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
11848
11849                                 for mypkg in vartree.dbapi.cp_list(
11850                                         portage.dep_getkey(mymatch[0])):
11851                                         myslot = vartree.getslot(mypkg)
11852                                         if myslot not in slotmap:
11853                                                 slotmap[myslot] = {}
11854                                         slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
11855
11856                                 for myslot in slotmap:
11857                                         counterkeys = slotmap[myslot].keys()
11858                                         if not counterkeys:
11859                                                 continue
11860                                         counterkeys.sort()
11861                                         pkgmap[mykey]["protected"].add(
11862                                                 slotmap[myslot][counterkeys[-1]])
11863                                         del counterkeys[-1]
11864
11865                                         for counter in counterkeys[:]:
11866                                                 mypkg = slotmap[myslot][counter]
11867                                                 if mypkg not in mymatch:
11868                                                         counterkeys.remove(counter)
11869                                                         pkgmap[mykey]["protected"].add(
11870                                                                 slotmap[myslot][counter])
11871
11872                                         #be pretty and get them in order of merge:
11873                                         for ckey in counterkeys:
11874                                                 mypkg = slotmap[myslot][ckey]
11875                                                 if mypkg not in all_selected:
11876                                                         pkgmap[mykey]["selected"].add(mypkg)
11877                                                         all_selected.add(mypkg)
11878                                         # ok, now the last-merged package
11879                                         # is protected, and the rest are selected
11880                 numselected = len(all_selected)
11881                 if global_unmerge and not numselected:
11882                         portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
11883                         return 0
11884         
11885                 if not numselected:
11886                         portage.writemsg_stdout(
11887                                 "\n>>> No packages selected for removal by " + \
11888                                 unmerge_action + "\n")
11889                         return 0
11890         finally:
11891                 if vdb_lock:
11892                         vartree.dbapi.flush_cache()
11893                         portage.locks.unlockdir(vdb_lock)
11894         
11895         from portage.sets.base import EditablePackageSet
11896         
11897         # generate a list of package sets that are directly or indirectly listed in "world",
11898         # as there is no persistent list of "installed" sets
11899         installed_sets = ["world"]
11900         stop = False
11901         pos = 0
11902         while not stop:
11903                 stop = True
11904                 pos = len(installed_sets)
11905                 for s in installed_sets[pos - 1:]:
11906                         if s not in sets:
11907                                 continue
11908                         candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
11909                         if candidates:
11910                                 stop = False
11911                                 installed_sets += candidates
11912         installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
11913         del stop, pos
11914
11915         # we don't want to unmerge packages that are still listed in user-editable package sets
11916         # listed in "world" as they would be remerged on the next update of "world" or the 
11917         # relevant package sets.
11918         unknown_sets = set()
11919         for cp in xrange(len(pkgmap)):
11920                 for cpv in pkgmap[cp]["selected"].copy():
11921                         try:
11922                                 pkg = _pkg(cpv)
11923                         except KeyError:
11924                                 # It could have been uninstalled
11925                                 # by a concurrent process.
11926                                 continue
11927
11928                         if unmerge_action != "clean" and \
11929                                 root_config.root == "/" and \
11930                                 portage.match_from_list(
11931                                 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
11932                                 msg = ("Not unmerging package %s since there is no valid " + \
11933                                 "reason for portage to unmerge itself.") % (pkg.cpv,)
11934                                 for line in textwrap.wrap(msg, 75):
11935                                         out.eerror(line)
11936                                 # adjust pkgmap so the display output is correct
11937                                 pkgmap[cp]["selected"].remove(cpv)
11938                                 all_selected.remove(cpv)
11939                                 pkgmap[cp]["protected"].add(cpv)
11940                                 continue
11941
11942                         parents = []
11943                         for s in installed_sets:
11944                                 # skip sets that the user requested to unmerge, and skip world 
11945                                 # unless we're unmerging a package set (as the package would be 
11946                                 # removed from "world" later on)
11947                                 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
11948                                         continue
11949
11950                                 if s not in sets:
11951                                         if s in unknown_sets:
11952                                                 continue
11953                                         unknown_sets.add(s)
11954                                         out = portage.output.EOutput()
11955                                         out.eerror(("Unknown set '@%s' in " + \
11956                                                 "%svar/lib/portage/world_sets") % \
11957                                                 (s, root_config.root))
11958                                         continue
11959
11960                                 # only check instances of EditablePackageSet as other classes are generally used for
11961                                 # special purposes and can be ignored here (and are usually generated dynamically, so the
11962                                 # user can't do much about them anyway)
11963                                 if isinstance(sets[s], EditablePackageSet):
11964
11965                                         # This is derived from a snippet of code in the
11966                                         # depgraph._iter_atoms_for_pkg() method.
11967                                         for atom in sets[s].iterAtomsForPackage(pkg):
11968                                                 inst_matches = vartree.dbapi.match(atom)
11969                                                 inst_matches.reverse() # descending order
11970                                                 higher_slot = None
11971                                                 for inst_cpv in inst_matches:
11972                                                         try:
11973                                                                 inst_pkg = _pkg(inst_cpv)
11974                                                         except KeyError:
11975                                                                 # It could have been uninstalled
11976                                                                 # by a concurrent process.
11977                                                                 continue
11978
11979                                                         if inst_pkg.cp != atom.cp:
11980                                                                 continue
11981                                                         if pkg >= inst_pkg:
11982                                                                 # This is descending order, and we're not
11983                                                                 # interested in any versions <= pkg given.
11984                                                                 break
11985                                                         if pkg.slot_atom != inst_pkg.slot_atom:
11986                                                                 higher_slot = inst_pkg
11987                                                                 break
11988                                                 if higher_slot is None:
11989                                                         parents.append(s)
11990                                                         break
11991                         if parents:
11992                                 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
11993                                 #print colorize("WARN", "but still listed in the following package sets:")
11994                                 #print "    %s\n" % ", ".join(parents)
11995                                 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
11996                                 print colorize("WARN", "still referenced by the following package sets:")
11997                                 print "    %s\n" % ", ".join(parents)
11998                                 # adjust pkgmap so the display output is correct
11999                                 pkgmap[cp]["selected"].remove(cpv)
12000                                 all_selected.remove(cpv)
12001                                 pkgmap[cp]["protected"].add(cpv)
12002         
12003         del installed_sets
12004
12005         numselected = len(all_selected)
12006         if not numselected:
12007                 writemsg_level(
12008                         "\n>>> No packages selected for removal by " + \
12009                         unmerge_action + "\n")
12010                 return 0
12011
12012         # Unmerge order only matters in some cases
12013         if not ordered:
12014                 unordered = {}
12015                 for d in pkgmap:
12016                         selected = d["selected"]
12017                         if not selected:
12018                                 continue
12019                         cp = portage.cpv_getkey(iter(selected).next())
12020                         cp_dict = unordered.get(cp)
12021                         if cp_dict is None:
12022                                 cp_dict = {}
12023                                 unordered[cp] = cp_dict
12024                                 for k in d:
12025                                         cp_dict[k] = set()
12026                         for k, v in d.iteritems():
12027                                 cp_dict[k].update(v)
12028                 pkgmap = [unordered[cp] for cp in sorted(unordered)]
12029
12030         for x in xrange(len(pkgmap)):
12031                 selected = pkgmap[x]["selected"]
12032                 if not selected:
12033                         continue
12034                 for mytype, mylist in pkgmap[x].iteritems():
12035                         if mytype == "selected":
12036                                 continue
12037                         mylist.difference_update(all_selected)
12038                 cp = portage.cpv_getkey(iter(selected).next())
12039                 for y in localtree.dep_match(cp):
12040                         if y not in pkgmap[x]["omitted"] and \
12041                                 y not in pkgmap[x]["selected"] and \
12042                                 y not in pkgmap[x]["protected"] and \
12043                                 y not in all_selected:
12044                                 pkgmap[x]["omitted"].add(y)
12045                 if global_unmerge and not pkgmap[x]["selected"]:
12046                         #avoid cluttering the preview printout with stuff that isn't getting unmerged
12047                         continue
12048                 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
12049                         writemsg_level(colorize("BAD","\a\n\n!!! " + \
12050                                 "'%s' is part of your system profile.\n" % cp),
12051                                 level=logging.WARNING, noiselevel=-1)
12052                         writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
12053                                 "be damaging to your system.\n\n"),
12054                                 level=logging.WARNING, noiselevel=-1)
12055                         if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
12056                                 countdown(int(settings["EMERGE_WARNING_DELAY"]),
12057                                         colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
12058                 if not quiet:
12059                         writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
12060                 else:
12061                         writemsg_level(bold(cp) + ": ", noiselevel=-1)
12062                 for mytype in ["selected","protected","omitted"]:
12063                         if not quiet:
12064                                 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
12065                         if pkgmap[x][mytype]:
12066                                 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
12067                                 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
12068                                 for pn, ver, rev in sorted_pkgs:
12069                                         if rev == "r0":
12070                                                 myversion = ver
12071                                         else:
12072                                                 myversion = ver + "-" + rev
12073                                         if mytype == "selected":
12074                                                 writemsg_level(
12075                                                         colorize("UNMERGE_WARN", myversion + " "),
12076                                                         noiselevel=-1)
12077                                         else:
12078                                                 writemsg_level(
12079                                                         colorize("GOOD", myversion + " "), noiselevel=-1)
12080                         else:
12081                                 writemsg_level("none ", noiselevel=-1)
12082                         if not quiet:
12083                                 writemsg_level("\n", noiselevel=-1)
12084                 if quiet:
12085                         writemsg_level("\n", noiselevel=-1)
12086
12087         writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
12088                 " packages are slated for removal.\n")
12089         writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
12090                         " and " + colorize("GOOD", "'omitted'") + \
12091                         " packages will not be removed.\n\n")
12092
12093         if "--pretend" in myopts:
12094                 #we're done... return
12095                 return 0
12096         if "--ask" in myopts:
12097                 if userquery("Would you like to unmerge these packages?")=="No":
12098                         # enter pretend mode for correct formatting of results
12099                         myopts["--pretend"] = True
12100                         print
12101                         print "Quitting."
12102                         print
12103                         return 0
12104         #the real unmerging begins, after a short delay....
12105         if clean_delay and not autoclean:
12106                 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
12107
12108         for x in xrange(len(pkgmap)):
12109                 for y in pkgmap[x]["selected"]:
12110                         writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
12111                         emergelog(xterm_titles, "=== Unmerging... ("+y+")")
12112                         mysplit = y.split("/")
12113                         #unmerge...
12114                         retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
12115                                 mysettings, unmerge_action not in ["clean","prune"],
12116                                 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
12117                                 scheduler=scheduler)
12118
12119                         if retval != os.EX_OK:
12120                                 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
12121                                 if raise_on_error:
12122                                         raise UninstallFailure(retval)
12123                                 sys.exit(retval)
12124                         else:
12125                                 if clean_world and hasattr(sets["world"], "cleanPackage"):
12126                                         sets["world"].cleanPackage(vartree.dbapi, y)
12127                                 emergelog(xterm_titles, " >>> unmerge success: "+y)
12128         if clean_world and hasattr(sets["world"], "remove"):
12129                 for s in root_config.setconfig.active:
12130                         sets["world"].remove(SETPREFIX+s)
12131         return 1
12132
12133 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
12134
12135         if os.path.exists("/usr/bin/install-info"):
12136                 out = portage.output.EOutput()
12137                 regen_infodirs=[]
12138                 for z in infodirs:
12139                         if z=='':
12140                                 continue
12141                         inforoot=normpath(root+z)
12142                         if os.path.isdir(inforoot):
12143                                 infomtime = long(os.stat(inforoot).st_mtime)
12144                                 if inforoot not in prev_mtimes or \
12145                                         prev_mtimes[inforoot] != infomtime:
12146                                                 regen_infodirs.append(inforoot)
12147
12148                 if not regen_infodirs:
12149                         portage.writemsg_stdout("\n")
12150                         out.einfo("GNU info directory index is up-to-date.")
12151                 else:
12152                         portage.writemsg_stdout("\n")
12153                         out.einfo("Regenerating GNU info directory index...")
12154
12155                         dir_extensions = ("", ".gz", ".bz2")
12156                         icount=0
12157                         badcount=0
12158                         errmsg = ""
12159                         for inforoot in regen_infodirs:
12160                                 if inforoot=='':
12161                                         continue
12162
12163                                 if not os.path.isdir(inforoot) or \
12164                                         not os.access(inforoot, os.W_OK):
12165                                         continue
12166
12167                                 file_list = os.listdir(inforoot)
12168                                 file_list.sort()
12169                                 dir_file = os.path.join(inforoot, "dir")
12170                                 moved_old_dir = False
12171                                 processed_count = 0
12172                                 for x in file_list:
12173                                         if x.startswith(".") or \
12174                                                 os.path.isdir(os.path.join(inforoot, x)):
12175                                                 continue
12176                                         if x.startswith("dir"):
12177                                                 skip = False
12178                                                 for ext in dir_extensions:
12179                                                         if x == "dir" + ext or \
12180                                                                 x == "dir" + ext + ".old":
12181                                                                 skip = True
12182                                                                 break
12183                                                 if skip:
12184                                                         continue
12185                                         if processed_count == 0:
12186                                                 for ext in dir_extensions:
12187                                                         try:
12188                                                                 os.rename(dir_file + ext, dir_file + ext + ".old")
12189                                                                 moved_old_dir = True
12190                                                         except EnvironmentError, e:
12191                                                                 if e.errno != errno.ENOENT:
12192                                                                         raise
12193                                                                 del e
12194                                         processed_count += 1
12195                                         myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
12196                                         existsstr="already exists, for file `"
12197                                         if myso!="":
12198                                                 if re.search(existsstr,myso):
12199                                                         # Already exists... Don't increment the count for this.
12200                                                         pass
12201                                                 elif myso[:44]=="install-info: warning: no info dir entry in ":
12202                                                         # This info file doesn't contain a DIR-header: install-info produces this
12203                                                         # (harmless) warning (the --quiet switch doesn't seem to work).
12204                                                         # Don't increment the count for this.
12205                                                         pass
12206                                                 else:
12207                                                         badcount=badcount+1
12208                                                         errmsg += myso + "\n"
12209                                         icount=icount+1
12210
12211                                 if moved_old_dir and not os.path.exists(dir_file):
12212                                         # We didn't generate a new dir file, so put the old file
12213                                         # back where it was originally found.
12214                                         for ext in dir_extensions:
12215                                                 try:
12216                                                         os.rename(dir_file + ext + ".old", dir_file + ext)
12217                                                 except EnvironmentError, e:
12218                                                         if e.errno != errno.ENOENT:
12219                                                                 raise
12220                                                         del e
12221
12222                                 # Clean dir.old cruft so that they don't prevent
12223                                 # unmerge of otherwise empty directories.
12224                                 for ext in dir_extensions:
12225                                         try:
12226                                                 os.unlink(dir_file + ext + ".old")
12227                                         except EnvironmentError, e:
12228                                                 if e.errno != errno.ENOENT:
12229                                                         raise
12230                                                 del e
12231
12232                                 #update mtime so we can potentially avoid regenerating.
12233                                 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
12234
12235                         if badcount:
12236                                 out.eerror("Processed %d info files; %d errors." % \
12237                                         (icount, badcount))
12238                                 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
12239                         else:
12240                                 if icount > 0:
12241                                         out.einfo("Processed %d info files." % (icount,))
12242
12243
12244 def display_news_notification(root_config, myopts):
12245         target_root = root_config.root
12246         trees = root_config.trees
12247         settings = trees["vartree"].settings
12248         portdb = trees["porttree"].dbapi
12249         vardb = trees["vartree"].dbapi
12250         NEWS_PATH = os.path.join("metadata", "news")
12251         UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
12252         newsReaderDisplay = False
12253         update = "--pretend" not in myopts
12254
12255         for repo in portdb.getRepositories():
12256                 unreadItems = checkUpdatedNewsItems(
12257                         portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
12258                 if unreadItems:
12259                         if not newsReaderDisplay:
12260                                 newsReaderDisplay = True
12261                                 print
12262                         print colorize("WARN", " * IMPORTANT:"),
12263                         print "%s news items need reading for repository '%s'." % (unreadItems, repo)
12264                         
12265         
12266         if newsReaderDisplay:
12267                 print colorize("WARN", " *"),
12268                 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
12269                 print
12270
12271 def display_preserved_libs(vardbapi):
12272         MAX_DISPLAY = 3
12273
12274         # Ensure the registry is consistent with existing files.
12275         vardbapi.plib_registry.pruneNonExisting()
12276
12277         if vardbapi.plib_registry.hasEntries():
12278                 print
12279                 print colorize("WARN", "!!!") + " existing preserved libs:"
12280                 plibdata = vardbapi.plib_registry.getPreservedLibs()
12281                 linkmap = vardbapi.linkmap
12282                 consumer_map = {}
12283                 owners = {}
12284                 linkmap_broken = False
12285
12286                 try:
12287                         linkmap.rebuild()
12288                 except portage.exception.CommandNotFound, e:
12289                         writemsg_level("!!! Command Not Found: %s\n" % (e,),
12290                                 level=logging.ERROR, noiselevel=-1)
12291                         del e
12292                         linkmap_broken = True
12293                 else:
12294                         search_for_owners = set()
12295                         for cpv in plibdata:
12296                                 internal_plib_keys = set(linkmap._obj_key(f) \
12297                                         for f in plibdata[cpv])
12298                                 for f in plibdata[cpv]:
12299                                         if f in consumer_map:
12300                                                 continue
12301                                         consumers = []
12302                                         for c in linkmap.findConsumers(f):
12303                                                 # Filter out any consumers that are also preserved libs
12304                                                 # belonging to the same package as the provider.
12305                                                 if linkmap._obj_key(c) not in internal_plib_keys:
12306                                                         consumers.append(c)
12307                                         consumers.sort()
12308                                         consumer_map[f] = consumers
12309                                         search_for_owners.update(consumers[:MAX_DISPLAY+1])
12310
12311                         owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
12312
12313                 for cpv in plibdata:
12314                         print colorize("WARN", ">>>") + " package: %s" % cpv
12315                         samefile_map = {}
12316                         for f in plibdata[cpv]:
12317                                 obj_key = linkmap._obj_key(f)
12318                                 alt_paths = samefile_map.get(obj_key)
12319                                 if alt_paths is None:
12320                                         alt_paths = set()
12321                                         samefile_map[obj_key] = alt_paths
12322                                 alt_paths.add(f)
12323
12324                         for alt_paths in samefile_map.itervalues():
12325                                 alt_paths = sorted(alt_paths)
12326                                 for p in alt_paths:
12327                                         print colorize("WARN", " * ") + " - %s" % (p,)
12328                                 f = alt_paths[0]
12329                                 consumers = consumer_map.get(f, [])
12330                                 for c in consumers[:MAX_DISPLAY]:
12331                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12332                                                 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
12333                                 if len(consumers) == MAX_DISPLAY + 1:
12334                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12335                                                 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
12336                                                 for x in owners.get(consumers[MAX_DISPLAY], [])))
12337                                 elif len(consumers) > MAX_DISPLAY:
12338                                         print colorize("WARN", " * ") + "     used by %d other files" % (len(consumers) - MAX_DISPLAY)
12339                 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
12340
12341
12342 def _flush_elog_mod_echo():
12343         """
12344         Dump the mod_echo output now so that our other
12345         notifications are shown last.
12346         @rtype: bool
12347         @returns: True if messages were shown, False otherwise.
12348         """
12349         messages_shown = False
12350         try:
12351                 from portage.elog import mod_echo
12352         except ImportError:
12353                 pass # happens during downgrade to a version without the module
12354         else:
12355                 messages_shown = bool(mod_echo._items)
12356                 mod_echo.finalize()
12357         return messages_shown
12358
12359 def post_emerge(root_config, myopts, mtimedb, retval):
12360         """
12361         Misc. things to run at the end of a merge session.
12362         
12363         Update Info Files
12364         Update Config Files
12365         Update News Items
12366         Commit mtimeDB
12367         Display preserved libs warnings
12368         Exit Emerge
12369
12370         @param trees: A dictionary mapping each ROOT to it's package databases
12371         @type trees: dict
12372         @param mtimedb: The mtimeDB to store data needed across merge invocations
12373         @type mtimedb: MtimeDB class instance
12374         @param retval: Emerge's return value
12375         @type retval: Int
12376         @rype: None
12377         @returns:
12378         1.  Calls sys.exit(retval)
12379         """
12380
12381         target_root = root_config.root
12382         trees = { target_root : root_config.trees }
12383         vardbapi = trees[target_root]["vartree"].dbapi
12384         settings = vardbapi.settings
12385         info_mtimes = mtimedb["info"]
12386
12387         # Load the most current variables from ${ROOT}/etc/profile.env
12388         settings.unlock()
12389         settings.reload()
12390         settings.regenerate()
12391         settings.lock()
12392
12393         config_protect = settings.get("CONFIG_PROTECT","").split()
12394         infodirs = settings.get("INFOPATH","").split(":") + \
12395                 settings.get("INFODIR","").split(":")
12396
12397         os.chdir("/")
12398
12399         if retval == os.EX_OK:
12400                 exit_msg = " *** exiting successfully."
12401         else:
12402                 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
12403         emergelog("notitles" not in settings.features, exit_msg)
12404
12405         _flush_elog_mod_echo()
12406
12407         counter_hash = settings.get("PORTAGE_COUNTER_HASH")
12408         if "--pretend" in myopts or (counter_hash is not None and \
12409                 counter_hash == vardbapi._counter_hash()):
12410                 display_news_notification(root_config, myopts)
12411                 # If vdb state has not changed then there's nothing else to do.
12412                 sys.exit(retval)
12413
12414         vdb_path = os.path.join(target_root, portage.VDB_PATH)
12415         portage.util.ensure_dirs(vdb_path)
12416         vdb_lock = None
12417         if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
12418                 vdb_lock = portage.locks.lockdir(vdb_path)
12419
12420         if vdb_lock:
12421                 try:
12422                         if "noinfo" not in settings.features:
12423                                 chk_updated_info_files(target_root,
12424                                         infodirs, info_mtimes, retval)
12425                         mtimedb.commit()
12426                 finally:
12427                         if vdb_lock:
12428                                 portage.locks.unlockdir(vdb_lock)
12429
12430         chk_updated_cfg_files(target_root, config_protect)
12431         
12432         display_news_notification(root_config, myopts)
12433         if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
12434                 display_preserved_libs(vardbapi)        
12435
12436         sys.exit(retval)
12437
12438
12439 def chk_updated_cfg_files(target_root, config_protect):
12440         if config_protect:
12441                 #number of directories with some protect files in them
12442                 procount=0
12443                 for x in config_protect:
12444                         x = os.path.join(target_root, x.lstrip(os.path.sep))
12445                         if not os.access(x, os.W_OK):
12446                                 # Avoid Permission denied errors generated
12447                                 # later by `find`.
12448                                 continue
12449                         try:
12450                                 mymode = os.lstat(x).st_mode
12451                         except OSError:
12452                                 continue
12453                         if stat.S_ISLNK(mymode):
12454                                 # We want to treat it like a directory if it
12455                                 # is a symlink to an existing directory.
12456                                 try:
12457                                         real_mode = os.stat(x).st_mode
12458                                         if stat.S_ISDIR(real_mode):
12459                                                 mymode = real_mode
12460                                 except OSError:
12461                                         pass
12462                         if stat.S_ISDIR(mymode):
12463                                 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
12464                         else:
12465                                 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
12466                                         os.path.split(x.rstrip(os.path.sep))
12467                         mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
12468                         a = commands.getstatusoutput(mycommand)
12469                         if a[0] != 0:
12470                                 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
12471                                 sys.stderr.flush()
12472                                 # Show the error message alone, sending stdout to /dev/null.
12473                                 os.system(mycommand + " 1>/dev/null")
12474                         else:
12475                                 files = a[1].split('\0')
12476                                 # split always produces an empty string as the last element
12477                                 if files and not files[-1]:
12478                                         del files[-1]
12479                                 if files:
12480                                         procount += 1
12481                                         print "\n"+colorize("WARN", " * IMPORTANT:"),
12482                                         if stat.S_ISDIR(mymode):
12483                                                  print "%d config files in '%s' need updating." % \
12484                                                         (len(files), x)
12485                                         else:
12486                                                  print "config file '%s' needs updating." % x
12487
12488                 if procount:
12489                         print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
12490                                 " section of the " + bold("emerge")
12491                         print " "+yellow("*")+" man page to learn how to update config files."
12492
12493 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12494         update=False):
12495         """
12496         Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12497         Returns the number of unread (yet relevent) items.
12498         
12499         @param portdb: a portage tree database
12500         @type portdb: pordbapi
12501         @param vardb: an installed package database
12502         @type vardb: vardbapi
12503         @param NEWS_PATH:
12504         @type NEWS_PATH:
12505         @param UNREAD_PATH:
12506         @type UNREAD_PATH:
12507         @param repo_id:
12508         @type repo_id:
12509         @rtype: Integer
12510         @returns:
12511         1.  The number of unread but relevant news items.
12512         
12513         """
12514         from portage.news import NewsManager
12515         manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12516         return manager.getUnreadItems( repo_id, update=update )
12517
12518 def insert_category_into_atom(atom, category):
12519         alphanum = re.search(r'\w', atom)
12520         if alphanum:
12521                 ret = atom[:alphanum.start()] + "%s/" % category + \
12522                         atom[alphanum.start():]
12523         else:
12524                 ret = None
12525         return ret
12526
12527 def is_valid_package_atom(x):
12528         if "/" not in x:
12529                 alphanum = re.search(r'\w', x)
12530                 if alphanum:
12531                         x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12532         return portage.isvalidatom(x)
12533
12534 def show_blocker_docs_link():
12535         print
12536         print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12537         print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12538         print
12539         print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12540         print
12541
12542 def show_mask_docs():
12543         print "For more information, see the MASKED PACKAGES section in the emerge"
12544         print "man page or refer to the Gentoo Handbook."
12545
12546 def action_sync(settings, trees, mtimedb, myopts, myaction):
12547         xterm_titles = "notitles" not in settings.features
12548         emergelog(xterm_titles, " === sync")
12549         myportdir = settings.get("PORTDIR", None)
12550         out = portage.output.EOutput()
12551         if not myportdir:
12552                 sys.stderr.write("!!! PORTDIR is undefined.  Is /etc/make.globals missing?\n")
12553                 sys.exit(1)
12554         if myportdir[-1]=="/":
12555                 myportdir=myportdir[:-1]
12556         try:
12557                 st = os.stat(myportdir)
12558         except OSError:
12559                 st = None
12560         if st is None:
12561                 print ">>>",myportdir,"not found, creating it."
12562                 os.makedirs(myportdir,0755)
12563                 st = os.stat(myportdir)
12564
12565         spawn_kwargs = {}
12566         spawn_kwargs["env"] = settings.environ()
12567         if 'usersync' in settings.features and \
12568                 portage.data.secpass >= 2 and \
12569                 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12570                 st.st_gid != os.getgid() and st.st_mode & 0070):
12571                 try:
12572                         homedir = pwd.getpwuid(st.st_uid).pw_dir
12573                 except KeyError:
12574                         pass
12575                 else:
12576                         # Drop privileges when syncing, in order to match
12577                         # existing uid/gid settings.
12578                         spawn_kwargs["uid"]    = st.st_uid
12579                         spawn_kwargs["gid"]    = st.st_gid
12580                         spawn_kwargs["groups"] = [st.st_gid]
12581                         spawn_kwargs["env"]["HOME"] = homedir
12582                         umask = 0002
12583                         if not st.st_mode & 0020:
12584                                 umask = umask | 0020
12585                         spawn_kwargs["umask"] = umask
12586
12587         syncuri = settings.get("SYNC", "").strip()
12588         if not syncuri:
12589                 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12590                         noiselevel=-1, level=logging.ERROR)
12591                 return 1
12592
12593         vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12594         vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12595
12596         os.umask(0022)
12597         dosyncuri = syncuri
12598         updatecache_flg = False
12599         if myaction == "metadata":
12600                 print "skipping sync"
12601                 updatecache_flg = True
12602         elif ".git" in vcs_dirs:
12603                 # Update existing git repository, and ignore the syncuri. We are
12604                 # going to trust the user and assume that the user is in the branch
12605                 # that he/she wants updated. We'll let the user manage branches with
12606                 # git directly.
12607                 if portage.process.find_binary("git") is None:
12608                         msg = ["Command not found: git",
12609                         "Type \"emerge dev-util/git\" to enable git support."]
12610                         for l in msg:
12611                                 writemsg_level("!!! %s\n" % l,
12612                                         level=logging.ERROR, noiselevel=-1)
12613                         return 1
12614                 msg = ">>> Starting git pull in %s..." % myportdir
12615                 emergelog(xterm_titles, msg )
12616                 writemsg_level(msg + "\n")
12617                 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12618                         (portage._shell_quote(myportdir),), **spawn_kwargs)
12619                 if exitcode != os.EX_OK:
12620                         msg = "!!! git pull error in %s." % myportdir
12621                         emergelog(xterm_titles, msg)
12622                         writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12623                         return exitcode
12624                 msg = ">>> Git pull in %s successful" % myportdir
12625                 emergelog(xterm_titles, msg)
12626                 writemsg_level(msg + "\n")
12627                 exitcode = git_sync_timestamps(settings, myportdir)
12628                 if exitcode == os.EX_OK:
12629                         updatecache_flg = True
12630         elif syncuri[:8]=="rsync://":
12631                 for vcs_dir in vcs_dirs:
12632                         writemsg_level(("!!! %s appears to be under revision " + \
12633                                 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12634                                 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12635                         return 1
12636                 if not os.path.exists("/usr/bin/rsync"):
12637                         print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12638                         print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12639                         sys.exit(1)
12640                 mytimeout=180
12641
12642                 rsync_opts = []
12643                 if settings["PORTAGE_RSYNC_OPTS"] == "":
12644                         portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12645                         rsync_opts.extend([
12646                                 "--recursive",    # Recurse directories
12647                                 "--links",        # Consider symlinks
12648                                 "--safe-links",   # Ignore links outside of tree
12649                                 "--perms",        # Preserve permissions
12650                                 "--times",        # Preserive mod times
12651                                 "--compress",     # Compress the data transmitted
12652                                 "--force",        # Force deletion on non-empty dirs
12653                                 "--whole-file",   # Don't do block transfers, only entire files
12654                                 "--delete",       # Delete files that aren't in the master tree
12655                                 "--stats",        # Show final statistics about what was transfered
12656                                 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12657                                 "--exclude=/distfiles",   # Exclude distfiles from consideration
12658                                 "--exclude=/local",       # Exclude local     from consideration
12659                                 "--exclude=/packages",    # Exclude packages  from consideration
12660                         ])
12661
12662                 else:
12663                         # The below validation is not needed when using the above hardcoded
12664                         # defaults.
12665
12666                         portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12667                         rsync_opts.extend(
12668                                 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
12669                         for opt in ("--recursive", "--times"):
12670                                 if opt not in rsync_opts:
12671                                         portage.writemsg(yellow("WARNING:") + " adding required option " + \
12672                                         "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12673                                         rsync_opts.append(opt)
12674         
12675                         for exclude in ("distfiles", "local", "packages"):
12676                                 opt = "--exclude=/%s" % exclude
12677                                 if opt not in rsync_opts:
12678                                         portage.writemsg(yellow("WARNING:") + \
12679                                         " adding required option %s not included in "  % opt + \
12680                                         "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12681                                         rsync_opts.append(opt)
12682         
12683                         if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12684                                 def rsync_opt_startswith(opt_prefix):
12685                                         for x in rsync_opts:
12686                                                 if x.startswith(opt_prefix):
12687                                                         return True
12688                                         return False
12689
12690                                 if not rsync_opt_startswith("--timeout="):
12691                                         rsync_opts.append("--timeout=%d" % mytimeout)
12692
12693                                 for opt in ("--compress", "--whole-file"):
12694                                         if opt not in rsync_opts:
12695                                                 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12696                                                 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12697                                                 rsync_opts.append(opt)
12698
12699                 if "--quiet" in myopts:
12700                         rsync_opts.append("--quiet")    # Shut up a lot
12701                 else:
12702                         rsync_opts.append("--verbose")  # Print filelist
12703
12704                 if "--verbose" in myopts:
12705                         rsync_opts.append("--progress")  # Progress meter for each file
12706
12707                 if "--debug" in myopts:
12708                         rsync_opts.append("--checksum") # Force checksum on all files
12709
12710                 # Real local timestamp file.
12711                 servertimestampfile = os.path.join(
12712                         myportdir, "metadata", "timestamp.chk")
12713
12714                 content = portage.util.grabfile(servertimestampfile)
12715                 mytimestamp = 0
12716                 if content:
12717                         try:
12718                                 mytimestamp = time.mktime(time.strptime(content[0],
12719                                         "%a, %d %b %Y %H:%M:%S +0000"))
12720                         except (OverflowError, ValueError):
12721                                 pass
12722                 del content
12723
12724                 try:
12725                         rsync_initial_timeout = \
12726                                 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12727                 except ValueError:
12728                         rsync_initial_timeout = 15
12729
12730                 try:
12731                         maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12732                 except SystemExit, e:
12733                         raise # Needed else can't exit
12734                 except:
12735                         maxretries=3 #default number of retries
12736
12737                 retries=0
12738                 user_name, hostname, port = re.split(
12739                         "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12740                 if port is None:
12741                         port=""
12742                 if user_name is None:
12743                         user_name=""
12744                 updatecache_flg=True
12745                 all_rsync_opts = set(rsync_opts)
12746                 extra_rsync_opts = shlex.split(
12747                         settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
12748                 all_rsync_opts.update(extra_rsync_opts)
12749                 family = socket.AF_INET
12750                 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12751                         family = socket.AF_INET
12752                 elif socket.has_ipv6 and \
12753                         ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12754                         family = socket.AF_INET6
12755                 ips=[]
12756                 SERVER_OUT_OF_DATE = -1
12757                 EXCEEDED_MAX_RETRIES = -2
12758                 while (1):
12759                         if ips:
12760                                 del ips[0]
12761                         if ips==[]:
12762                                 try:
12763                                         for addrinfo in socket.getaddrinfo(
12764                                                 hostname, None, family, socket.SOCK_STREAM):
12765                                                 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12766                                                         # IPv6 addresses need to be enclosed in square brackets
12767                                                         ips.append("[%s]" % addrinfo[4][0])
12768                                                 else:
12769                                                         ips.append(addrinfo[4][0])
12770                                         from random import shuffle
12771                                         shuffle(ips)
12772                                 except SystemExit, e:
12773                                         raise # Needed else can't exit
12774                                 except Exception, e:
12775                                         print "Notice:",str(e)
12776                                         dosyncuri=syncuri
12777
12778                         if ips:
12779                                 try:
12780                                         dosyncuri = syncuri.replace(
12781                                                 "//" + user_name + hostname + port + "/",
12782                                                 "//" + user_name + ips[0] + port + "/", 1)
12783                                 except SystemExit, e:
12784                                         raise # Needed else can't exit
12785                                 except Exception, e:
12786                                         print "Notice:",str(e)
12787                                         dosyncuri=syncuri
12788
12789                         if (retries==0):
12790                                 if "--ask" in myopts:
12791                                         if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12792                                                 print
12793                                                 print "Quitting."
12794                                                 print
12795                                                 sys.exit(0)
12796                                 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12797                                 if "--quiet" not in myopts:
12798                                         print ">>> Starting rsync with "+dosyncuri+"..."
12799                         else:
12800                                 emergelog(xterm_titles,
12801                                         ">>> Starting retry %d of %d with %s" % \
12802                                                 (retries,maxretries,dosyncuri))
12803                                 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
12804
12805                         if mytimestamp != 0 and "--quiet" not in myopts:
12806                                 print ">>> Checking server timestamp ..."
12807
12808                         rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
12809
12810                         if "--debug" in myopts:
12811                                 print rsynccommand
12812
12813                         exitcode = os.EX_OK
12814                         servertimestamp = 0
12815                         # Even if there's no timestamp available locally, fetch the
12816                         # timestamp anyway as an initial probe to verify that the server is
12817                         # responsive.  This protects us from hanging indefinitely on a
12818                         # connection attempt to an unresponsive server which rsync's
12819                         # --timeout option does not prevent.
12820                         if True:
12821                                 # Temporary file for remote server timestamp comparison.
12822                                 from tempfile import mkstemp
12823                                 fd, tmpservertimestampfile = mkstemp()
12824                                 os.close(fd)
12825                                 mycommand = rsynccommand[:]
12826                                 mycommand.append(dosyncuri.rstrip("/") + \
12827                                         "/metadata/timestamp.chk")
12828                                 mycommand.append(tmpservertimestampfile)
12829                                 content = None
12830                                 mypids = []
12831                                 try:
12832                                         def timeout_handler(signum, frame):
12833                                                 raise portage.exception.PortageException("timed out")
12834                                         signal.signal(signal.SIGALRM, timeout_handler)
12835                                         # Timeout here in case the server is unresponsive.  The
12836                                         # --timeout rsync option doesn't apply to the initial
12837                                         # connection attempt.
12838                                         if rsync_initial_timeout:
12839                                                 signal.alarm(rsync_initial_timeout)
12840                                         try:
12841                                                 mypids.extend(portage.process.spawn(
12842                                                         mycommand, env=settings.environ(), returnpid=True))
12843                                                 exitcode = os.waitpid(mypids[0], 0)[1]
12844                                                 content = portage.grabfile(tmpservertimestampfile)
12845                                         finally:
12846                                                 if rsync_initial_timeout:
12847                                                         signal.alarm(0)
12848                                                 try:
12849                                                         os.unlink(tmpservertimestampfile)
12850                                                 except OSError:
12851                                                         pass
12852                                 except portage.exception.PortageException, e:
12853                                         # timed out
12854                                         print e
12855                                         del e
12856                                         if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
12857                                                 os.kill(mypids[0], signal.SIGTERM)
12858                                                 os.waitpid(mypids[0], 0)
12859                                         # This is the same code rsync uses for timeout.
12860                                         exitcode = 30
12861                                 else:
12862                                         if exitcode != os.EX_OK:
12863                                                 if exitcode & 0xff:
12864                                                         exitcode = (exitcode & 0xff) << 8
12865                                                 else:
12866                                                         exitcode = exitcode >> 8
12867                                 if mypids:
12868                                         portage.process.spawned_pids.remove(mypids[0])
12869                                 if content:
12870                                         try:
12871                                                 servertimestamp = time.mktime(time.strptime(
12872                                                         content[0], "%a, %d %b %Y %H:%M:%S +0000"))
12873                                         except (OverflowError, ValueError):
12874                                                 pass
12875                                 del mycommand, mypids, content
12876                         if exitcode == os.EX_OK:
12877                                 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
12878                                         emergelog(xterm_titles,
12879                                                 ">>> Cancelling sync -- Already current.")
12880                                         print
12881                                         print ">>>"
12882                                         print ">>> Timestamps on the server and in the local repository are the same."
12883                                         print ">>> Cancelling all further sync action. You are already up to date."
12884                                         print ">>>"
12885                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
12886                                         print ">>>"
12887                                         print
12888                                         sys.exit(0)
12889                                 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
12890                                         emergelog(xterm_titles,
12891                                                 ">>> Server out of date: %s" % dosyncuri)
12892                                         print
12893                                         print ">>>"
12894                                         print ">>> SERVER OUT OF DATE: %s" % dosyncuri
12895                                         print ">>>"
12896                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
12897                                         print ">>>"
12898                                         print
12899                                         exitcode = SERVER_OUT_OF_DATE
12900                                 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
12901                                         # actual sync
12902                                         mycommand = rsynccommand + [dosyncuri+"/", myportdir]
12903                                         exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
12904                                         if exitcode in [0,1,3,4,11,14,20,21]:
12905                                                 break
12906                         elif exitcode in [1,3,4,11,14,20,21]:
12907                                 break
12908                         else:
12909                                 # Code 2 indicates protocol incompatibility, which is expected
12910                                 # for servers with protocol < 29 that don't support
12911                                 # --prune-empty-directories.  Retry for a server that supports
12912                                 # at least rsync protocol version 29 (>=rsync-2.6.4).
12913                                 pass
12914
12915                         retries=retries+1
12916
12917                         if retries<=maxretries:
12918                                 print ">>> Retrying..."
12919                                 time.sleep(11)
12920                         else:
12921                                 # over retries
12922                                 # exit loop
12923                                 updatecache_flg=False
12924                                 exitcode = EXCEEDED_MAX_RETRIES
12925                                 break
12926
12927                 if (exitcode==0):
12928                         emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
12929                 elif exitcode == SERVER_OUT_OF_DATE:
12930                         sys.exit(1)
12931                 elif exitcode == EXCEEDED_MAX_RETRIES:
12932                         sys.stderr.write(
12933                                 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
12934                         sys.exit(1)
12935                 elif (exitcode>0):
12936                         msg = []
12937                         if exitcode==1:
12938                                 msg.append("Rsync has reported that there is a syntax error. Please ensure")
12939                                 msg.append("that your SYNC statement is proper.")
12940                                 msg.append("SYNC=" + settings["SYNC"])
12941                         elif exitcode==11:
12942                                 msg.append("Rsync has reported that there is a File IO error. Normally")
12943                                 msg.append("this means your disk is full, but can be caused by corruption")
12944                                 msg.append("on the filesystem that contains PORTDIR. Please investigate")
12945                                 msg.append("and try again after the problem has been fixed.")
12946                                 msg.append("PORTDIR=" + settings["PORTDIR"])
12947                         elif exitcode==20:
12948                                 msg.append("Rsync was killed before it finished.")
12949                         else:
12950                                 msg.append("Rsync has not successfully finished. It is recommended that you keep")
12951                                 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
12952                                 msg.append("to use rsync due to firewall or other restrictions. This should be a")
12953                                 msg.append("temporary problem unless complications exist with your network")
12954                                 msg.append("(and possibly your system's filesystem) configuration.")
12955                         for line in msg:
12956                                 out.eerror(line)
12957                         sys.exit(exitcode)
12958         elif syncuri[:6]=="cvs://":
12959                 if not os.path.exists("/usr/bin/cvs"):
12960                         print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
12961                         print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
12962                         sys.exit(1)
12963                 cvsroot=syncuri[6:]
12964                 cvsdir=os.path.dirname(myportdir)
12965                 if not os.path.exists(myportdir+"/CVS"):
12966                         #initial checkout
12967                         print ">>> Starting initial cvs checkout with "+syncuri+"..."
12968                         if os.path.exists(cvsdir+"/gentoo-x86"):
12969                                 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
12970                                 sys.exit(1)
12971                         try:
12972                                 os.rmdir(myportdir)
12973                         except OSError, e:
12974                                 if e.errno != errno.ENOENT:
12975                                         sys.stderr.write(
12976                                                 "!!! existing '%s' directory; exiting.\n" % myportdir)
12977                                         sys.exit(1)
12978                                 del e
12979                         if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
12980                                 print "!!! cvs checkout error; exiting."
12981                                 sys.exit(1)
12982                         os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
12983                 else:
12984                         #cvs update
12985                         print ">>> Starting cvs update with "+syncuri+"..."
12986                         retval = portage.process.spawn_bash(
12987                                 "cd %s; cvs -z0 -q update -dP" % \
12988                                 (portage._shell_quote(myportdir),), **spawn_kwargs)
12989                         if retval != os.EX_OK:
12990                                 sys.exit(retval)
12991                 dosyncuri = syncuri
12992         else:
12993                 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
12994                         noiselevel=-1, level=logging.ERROR)
12995                 return 1
12996
12997         if updatecache_flg and  \
12998                 myaction != "metadata" and \
12999                 "metadata-transfer" not in settings.features:
13000                 updatecache_flg = False
13001
13002         # Reload the whole config from scratch.
13003         settings, trees, mtimedb = load_emerge_config(trees=trees)
13004         root_config = trees[settings["ROOT"]]["root_config"]
13005         portdb = trees[settings["ROOT"]]["porttree"].dbapi
13006
13007         if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
13008                 action_metadata(settings, portdb, myopts)
13009
13010         if portage._global_updates(trees, mtimedb["updates"]):
13011                 mtimedb.commit()
13012                 # Reload the whole config from scratch.
13013                 settings, trees, mtimedb = load_emerge_config(trees=trees)
13014                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13015                 root_config = trees[settings["ROOT"]]["root_config"]
13016
13017         mybestpv = portdb.xmatch("bestmatch-visible",
13018                 portage.const.PORTAGE_PACKAGE_ATOM)
13019         mypvs = portage.best(
13020                 trees[settings["ROOT"]]["vartree"].dbapi.match(
13021                 portage.const.PORTAGE_PACKAGE_ATOM))
13022
13023         chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
13024
13025         if myaction != "metadata":
13026                 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
13027                         retval = portage.process.spawn(
13028                                 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
13029                                 dosyncuri], env=settings.environ())
13030                         if retval != os.EX_OK:
13031                                 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
13032
13033         if(mybestpv != mypvs) and not "--quiet" in myopts:
13034                 print
13035                 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
13036                 print red(" * ")+"that you update portage now, before any other packages are updated."
13037                 print
13038                 print red(" * ")+"To update portage, run 'emerge portage' now."
13039                 print
13040         
13041         display_news_notification(root_config, myopts)
13042         return os.EX_OK
13043
13044 def git_sync_timestamps(settings, portdir):
13045         """
13046         Since git doesn't preserve timestamps, synchronize timestamps between
13047         entries and ebuilds/eclasses. Assume the cache has the correct timestamp
13048         for a given file as long as the file in the working tree is not modified
13049         (relative to HEAD).
13050         """
13051         cache_dir = os.path.join(portdir, "metadata", "cache")
13052         if not os.path.isdir(cache_dir):
13053                 return os.EX_OK
13054         writemsg_level(">>> Synchronizing timestamps...\n")
13055
13056         from portage.cache.cache_errors import CacheError
13057         try:
13058                 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
13059                         portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13060         except CacheError, e:
13061                 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
13062                         level=logging.ERROR, noiselevel=-1)
13063                 return 1
13064
13065         ec_dir = os.path.join(portdir, "eclass")
13066         try:
13067                 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
13068                         if f.endswith(".eclass"))
13069         except OSError, e:
13070                 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
13071                         level=logging.ERROR, noiselevel=-1)
13072                 return 1
13073
13074         args = [portage.const.BASH_BINARY, "-c",
13075                 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
13076                 portage._shell_quote(portdir)]
13077         import subprocess
13078         proc = subprocess.Popen(args, stdout=subprocess.PIPE)
13079         modified_files = set(l.rstrip("\n") for l in proc.stdout)
13080         rval = proc.wait()
13081         if rval != os.EX_OK:
13082                 return rval
13083
13084         modified_eclasses = set(ec for ec in ec_names \
13085                 if os.path.join("eclass", ec + ".eclass") in modified_files)
13086
13087         updated_ec_mtimes = {}
13088
13089         for cpv in cache_db:
13090                 cpv_split = portage.catpkgsplit(cpv)
13091                 if cpv_split is None:
13092                         writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
13093                                 level=logging.ERROR, noiselevel=-1)
13094                         continue
13095
13096                 cat, pn, ver, rev = cpv_split
13097                 cat, pf = portage.catsplit(cpv)
13098                 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
13099                 if relative_eb_path in modified_files:
13100                         continue
13101
13102                 try:
13103                         cache_entry = cache_db[cpv]
13104                         eb_mtime = cache_entry.get("_mtime_")
13105                         ec_mtimes = cache_entry.get("_eclasses_")
13106                 except KeyError:
13107                         writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
13108                                 level=logging.ERROR, noiselevel=-1)
13109                         continue
13110                 except CacheError, e:
13111                         writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
13112                                 (cpv, e), level=logging.ERROR, noiselevel=-1)
13113                         continue
13114
13115                 if eb_mtime is None:
13116                         writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
13117                                 level=logging.ERROR, noiselevel=-1)
13118                         continue
13119
13120                 try:
13121                         eb_mtime = long(eb_mtime)
13122                 except ValueError:
13123                         writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
13124                                 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
13125                         continue
13126
13127                 if ec_mtimes is None:
13128                         writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
13129                                 level=logging.ERROR, noiselevel=-1)
13130                         continue
13131
13132                 if modified_eclasses.intersection(ec_mtimes):
13133                         continue
13134
13135                 missing_eclasses = set(ec_mtimes).difference(ec_names)
13136                 if missing_eclasses:
13137                         writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
13138                                 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
13139                                 noiselevel=-1)
13140                         continue
13141
13142                 eb_path = os.path.join(portdir, relative_eb_path)
13143                 try:
13144                         current_eb_mtime = os.stat(eb_path)
13145                 except OSError:
13146                         writemsg_level("!!! Missing ebuild: %s\n" % \
13147                                 (cpv,), level=logging.ERROR, noiselevel=-1)
13148                         continue
13149
13150                 inconsistent = False
13151                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13152                         updated_mtime = updated_ec_mtimes.get(ec)
13153                         if updated_mtime is not None and updated_mtime != ec_mtime:
13154                                 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
13155                                         (cpv, ec), level=logging.ERROR, noiselevel=-1)
13156                                 inconsistent = True
13157                                 break
13158
13159                 if inconsistent:
13160                         continue
13161
13162                 if current_eb_mtime != eb_mtime:
13163                         os.utime(eb_path, (eb_mtime, eb_mtime))
13164
13165                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13166                         if ec in updated_ec_mtimes:
13167                                 continue
13168                         ec_path = os.path.join(ec_dir, ec + ".eclass")
13169                         current_mtime = long(os.stat(ec_path).st_mtime)
13170                         if current_mtime != ec_mtime:
13171                                 os.utime(ec_path, (ec_mtime, ec_mtime))
13172                         updated_ec_mtimes[ec] = ec_mtime
13173
13174         return os.EX_OK
13175
13176 def action_metadata(settings, portdb, myopts):
13177         portage.writemsg_stdout("\n>>> Updating Portage cache:      ")
13178         old_umask = os.umask(0002)
13179         cachedir = os.path.normpath(settings.depcachedir)
13180         if cachedir in ["/",    "/bin", "/dev",  "/etc",  "/home",
13181                                         "/lib", "/opt", "/proc", "/root", "/sbin",
13182                                         "/sys", "/tmp", "/usr",  "/var"]:
13183                 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
13184                         "ROOT DIRECTORY ON YOUR SYSTEM."
13185                 print >> sys.stderr, \
13186                         "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
13187                 sys.exit(73)
13188         if not os.path.exists(cachedir):
13189                 os.mkdir(cachedir)
13190
13191         ec = portage.eclass_cache.cache(portdb.porttree_root)
13192         myportdir = os.path.realpath(settings["PORTDIR"])
13193         cm = settings.load_best_module("portdbapi.metadbmodule")(
13194                 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13195
13196         from portage.cache import util
13197
13198         class percentage_noise_maker(util.quiet_mirroring):
13199                 def __init__(self, dbapi):
13200                         self.dbapi = dbapi
13201                         self.cp_all = dbapi.cp_all()
13202                         l = len(self.cp_all)
13203                         self.call_update_min = 100000000
13204                         self.min_cp_all = l/100.0
13205                         self.count = 1
13206                         self.pstr = ''
13207
13208                 def __iter__(self):
13209                         for x in self.cp_all:
13210                                 self.count += 1
13211                                 if self.count > self.min_cp_all:
13212                                         self.call_update_min = 0
13213                                         self.count = 0
13214                                 for y in self.dbapi.cp_list(x):
13215                                         yield y
13216                         self.call_update_mine = 0
13217
13218                 def update(self, *arg):
13219                         try:                            self.pstr = int(self.pstr) + 1
13220                         except ValueError:      self.pstr = 1
13221                         sys.stdout.write("%s%i%%" % \
13222                                 ("\b" * (len(str(self.pstr))+1), self.pstr))
13223                         sys.stdout.flush()
13224                         self.call_update_min = 10000000
13225
13226                 def finish(self, *arg):
13227                         sys.stdout.write("\b\b\b\b100%\n")
13228                         sys.stdout.flush()
13229
13230         if "--quiet" in myopts:
13231                 def quicky_cpv_generator(cp_all_list):
13232                         for x in cp_all_list:
13233                                 for y in portdb.cp_list(x):
13234                                         yield y
13235                 source = quicky_cpv_generator(portdb.cp_all())
13236                 noise_maker = portage.cache.util.quiet_mirroring()
13237         else:
13238                 noise_maker = source = percentage_noise_maker(portdb)
13239         portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
13240                 eclass_cache=ec, verbose_instance=noise_maker)
13241
13242         sys.stdout.flush()
13243         os.umask(old_umask)
13244
13245 def action_regen(settings, portdb, max_jobs, max_load):
13246         xterm_titles = "notitles" not in settings.features
13247         emergelog(xterm_titles, " === regen")
13248         #regenerate cache entries
13249         portage.writemsg_stdout("Regenerating cache entries...\n")
13250         try:
13251                 os.close(sys.stdin.fileno())
13252         except SystemExit, e:
13253                 raise # Needed else can't exit
13254         except:
13255                 pass
13256         sys.stdout.flush()
13257
13258         regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
13259         regen.run()
13260
13261         portage.writemsg_stdout("done!\n")
13262         return regen.returncode
13263
13264 def action_config(settings, trees, myopts, myfiles):
13265         if len(myfiles) != 1:
13266                 print red("!!! config can only take a single package atom at this time\n")
13267                 sys.exit(1)
13268         if not is_valid_package_atom(myfiles[0]):
13269                 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
13270                         noiselevel=-1)
13271                 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
13272                 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
13273                 sys.exit(1)
13274         print
13275         try:
13276                 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
13277         except portage.exception.AmbiguousPackageName, e:
13278                 # Multiple matches thrown from cpv_expand
13279                 pkgs = e.args[0]
13280         if len(pkgs) == 0:
13281                 print "No packages found.\n"
13282                 sys.exit(0)
13283         elif len(pkgs) > 1:
13284                 if "--ask" in myopts:
13285                         options = []
13286                         print "Please select a package to configure:"
13287                         idx = 0
13288                         for pkg in pkgs:
13289                                 idx += 1
13290                                 options.append(str(idx))
13291                                 print options[-1]+") "+pkg
13292                         print "X) Cancel"
13293                         options.append("X")
13294                         idx = userquery("Selection?", options)
13295                         if idx == "X":
13296                                 sys.exit(0)
13297                         pkg = pkgs[int(idx)-1]
13298                 else:
13299                         print "The following packages available:"
13300                         for pkg in pkgs:
13301                                 print "* "+pkg
13302                         print "\nPlease use a specific atom or the --ask option."
13303                         sys.exit(1)
13304         else:
13305                 pkg = pkgs[0]
13306
13307         print
13308         if "--ask" in myopts:
13309                 if userquery("Ready to configure "+pkg+"?") == "No":
13310                         sys.exit(0)
13311         else:
13312                 print "Configuring pkg..."
13313         print
13314         ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
13315         mysettings = portage.config(clone=settings)
13316         vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
13317         debug = mysettings.get("PORTAGE_DEBUG") == "1"
13318         retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
13319                 mysettings,
13320                 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
13321                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
13322         if retval == os.EX_OK:
13323                 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
13324                         mysettings, debug=debug, mydbapi=vardb, tree="vartree")
13325         print
13326
13327 def action_info(settings, trees, myopts, myfiles):
13328         print getportageversion(settings["PORTDIR"], settings["ROOT"],
13329                 settings.profile_path, settings["CHOST"],
13330                 trees[settings["ROOT"]]["vartree"].dbapi)
13331         header_width = 65
13332         header_title = "System Settings"
13333         if myfiles:
13334                 print header_width * "="
13335                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13336         print header_width * "="
13337         print "System uname: "+platform.platform(aliased=1)
13338
13339         lastSync = portage.grabfile(os.path.join(
13340                 settings["PORTDIR"], "metadata", "timestamp.chk"))
13341         print "Timestamp of tree:",
13342         if lastSync:
13343                 print lastSync[0]
13344         else:
13345                 print "Unknown"
13346
13347         output=commands.getstatusoutput("distcc --version")
13348         if not output[0]:
13349                 print str(output[1].split("\n",1)[0]),
13350                 if "distcc" in settings.features:
13351                         print "[enabled]"
13352                 else:
13353                         print "[disabled]"
13354
13355         output=commands.getstatusoutput("ccache -V")
13356         if not output[0]:
13357                 print str(output[1].split("\n",1)[0]),
13358                 if "ccache" in settings.features:
13359                         print "[enabled]"
13360                 else:
13361                         print "[disabled]"
13362
13363         myvars  = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
13364                    "sys-devel/binutils", "sys-devel/libtool",  "dev-lang/python"]
13365         myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
13366         myvars  = portage.util.unique_array(myvars)
13367         myvars.sort()
13368
13369         for x in myvars:
13370                 if portage.isvalidatom(x):
13371                         pkg_matches = trees["/"]["vartree"].dbapi.match(x)
13372                         pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
13373                         pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
13374                         pkgs = []
13375                         for pn, ver, rev in pkg_matches:
13376                                 if rev != "r0":
13377                                         pkgs.append(ver + "-" + rev)
13378                                 else:
13379                                         pkgs.append(ver)
13380                         if pkgs:
13381                                 pkgs = ", ".join(pkgs)
13382                                 print "%-20s %s" % (x+":", pkgs)
13383                 else:
13384                         print "%-20s %s" % (x+":", "[NOT VALID]")
13385
13386         libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
13387
13388         if "--verbose" in myopts:
13389                 myvars=settings.keys()
13390         else:
13391                 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
13392                           'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
13393                           'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
13394                           'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
13395
13396                 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
13397
13398         myvars = portage.util.unique_array(myvars)
13399         unset_vars = []
13400         myvars.sort()
13401         for x in myvars:
13402                 if x in settings:
13403                         if x != "USE":
13404                                 print '%s="%s"' % (x, settings[x])
13405                         else:
13406                                 use = set(settings["USE"].split())
13407                                 use_expand = settings["USE_EXPAND"].split()
13408                                 use_expand.sort()
13409                                 for varname in use_expand:
13410                                         flag_prefix = varname.lower() + "_"
13411                                         for f in list(use):
13412                                                 if f.startswith(flag_prefix):
13413                                                         use.remove(f)
13414                                 use = list(use)
13415                                 use.sort()
13416                                 print 'USE="%s"' % " ".join(use),
13417                                 for varname in use_expand:
13418                                         myval = settings.get(varname)
13419                                         if myval:
13420                                                 print '%s="%s"' % (varname, myval),
13421                                 print
13422                 else:
13423                         unset_vars.append(x)
13424         if unset_vars:
13425                 print "Unset:  "+", ".join(unset_vars)
13426         print
13427
13428         if "--debug" in myopts:
13429                 for x in dir(portage):
13430                         module = getattr(portage, x)
13431                         if "cvs_id_string" in dir(module):
13432                                 print "%s: %s" % (str(x), str(module.cvs_id_string))
13433
13434         # See if we can find any packages installed matching the strings
13435         # passed on the command line
13436         mypkgs = []
13437         vardb = trees[settings["ROOT"]]["vartree"].dbapi
13438         portdb = trees[settings["ROOT"]]["porttree"].dbapi
13439         for x in myfiles:
13440                 mypkgs.extend(vardb.match(x))
13441
13442         # If some packages were found...
13443         if mypkgs:
13444                 # Get our global settings (we only print stuff if it varies from
13445                 # the current config)
13446                 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
13447                 auxkeys = mydesiredvars + [ "USE", "IUSE"]
13448                 global_vals = {}
13449                 pkgsettings = portage.config(clone=settings)
13450
13451                 for myvar in mydesiredvars:
13452                         global_vals[myvar] = set(settings.get(myvar, "").split())
13453
13454                 # Loop through each package
13455                 # Only print settings if they differ from global settings
13456                 header_title = "Package Settings"
13457                 print header_width * "="
13458                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13459                 print header_width * "="
13460                 from portage.output import EOutput
13461                 out = EOutput()
13462                 for pkg in mypkgs:
13463                         # Get all package specific variables
13464                         auxvalues = vardb.aux_get(pkg, auxkeys)
13465                         valuesmap = {}
13466                         for i in xrange(len(auxkeys)):
13467                                 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
13468                         diff_values = {}
13469                         for myvar in mydesiredvars:
13470                                 # If the package variable doesn't match the
13471                                 # current global variable, something has changed
13472                                 # so set diff_found so we know to print
13473                                 if valuesmap[myvar] != global_vals[myvar]:
13474                                         diff_values[myvar] = valuesmap[myvar]
13475                         valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
13476                         valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
13477                         pkgsettings.reset()
13478                         # If a matching ebuild is no longer available in the tree, maybe it
13479                         # would make sense to compare against the flags for the best
13480                         # available version with the same slot?
13481                         mydb = None
13482                         if portdb.cpv_exists(pkg):
13483                                 mydb = portdb
13484                         pkgsettings.setcpv(pkg, mydb=mydb)
13485                         if valuesmap["IUSE"].intersection(
13486                                 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
13487                                 diff_values["USE"] = valuesmap["USE"]
13488                         # If a difference was found, print the info for
13489                         # this package.
13490                         if diff_values:
13491                                 # Print package info
13492                                 print "%s was built with the following:" % pkg
13493                                 for myvar in mydesiredvars + ["USE"]:
13494                                         if myvar in diff_values:
13495                                                 mylist = list(diff_values[myvar])
13496                                                 mylist.sort()
13497                                                 print "%s=\"%s\"" % (myvar, " ".join(mylist))
13498                                 print
13499                         print ">>> Attempting to run pkg_info() for '%s'" % pkg
13500                         ebuildpath = vardb.findname(pkg)
13501                         if not ebuildpath or not os.path.exists(ebuildpath):
13502                                 out.ewarn("No ebuild found for '%s'" % pkg)
13503                                 continue
13504                         portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13505                                 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13506                                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13507                                 tree="vartree")
13508
13509 def action_search(root_config, myopts, myfiles, spinner):
13510         if not myfiles:
13511                 print "emerge: no search terms provided."
13512         else:
13513                 searchinstance = search(root_config,
13514                         spinner, "--searchdesc" in myopts,
13515                         "--quiet" not in myopts, "--usepkg" in myopts,
13516                         "--usepkgonly" in myopts)
13517                 for mysearch in myfiles:
13518                         try:
13519                                 searchinstance.execute(mysearch)
13520                         except re.error, comment:
13521                                 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13522                                 sys.exit(1)
13523                         searchinstance.output()
13524
13525 def action_depclean(settings, trees, ldpath_mtimes,
13526         myopts, action, myfiles, spinner):
13527         # Kill packages that aren't explicitly merged or are required as a
13528         # dependency of another package. World file is explicit.
13529
13530         # Global depclean or prune operations are not very safe when there are
13531         # missing dependencies since it's unknown how badly incomplete
13532         # the dependency graph is, and we might accidentally remove packages
13533         # that should have been pulled into the graph. On the other hand, it's
13534         # relatively safe to ignore missing deps when only asked to remove
13535         # specific packages.
13536         allow_missing_deps = len(myfiles) > 0
13537
13538         msg = []
13539         msg.append("Always study the list of packages to be cleaned for any obvious\n")
13540         msg.append("mistakes. Packages that are part of the world set will always\n")
13541         msg.append("be kept.  They can be manually added to this set with\n")
13542         msg.append(good("`emerge --noreplace <atom>`") + ".  Packages that are listed in\n")
13543         msg.append("package.provided (see portage(5)) will be removed by\n")
13544         msg.append("depclean, even if they are part of the world set.\n")
13545         msg.append("\n")
13546         msg.append("As a safety measure, depclean will not remove any packages\n")
13547         msg.append("unless *all* required dependencies have been resolved.  As a\n")
13548         msg.append("consequence, it is often necessary to run %s\n" % \
13549                 good("`emerge --update"))
13550         msg.append(good("--newuse --deep @system @world`") + \
13551                 " prior to depclean.\n")
13552
13553         if action == "depclean" and "--quiet" not in myopts and not myfiles:
13554                 portage.writemsg_stdout("\n")
13555                 for x in msg:
13556                         portage.writemsg_stdout(colorize("WARN", " * ") + x)
13557
13558         xterm_titles = "notitles" not in settings.features
13559         myroot = settings["ROOT"]
13560         root_config = trees[myroot]["root_config"]
13561         getSetAtoms = root_config.setconfig.getSetAtoms
13562         vardb = trees[myroot]["vartree"].dbapi
13563
13564         required_set_names = ("system", "world")
13565         required_sets = {}
13566         set_args = []
13567
13568         for s in required_set_names:
13569                 required_sets[s] = InternalPackageSet(
13570                         initial_atoms=getSetAtoms(s))
13571
13572         
13573         # When removing packages, use a temporary version of world
13574         # which excludes packages that are intended to be eligible for
13575         # removal.
13576         world_temp_set = required_sets["world"]
13577         system_set = required_sets["system"]
13578
13579         if not system_set or not world_temp_set:
13580
13581                 if not system_set:
13582                         writemsg_level("!!! You have no system list.\n",
13583                                 level=logging.ERROR, noiselevel=-1)
13584
13585                 if not world_temp_set:
13586                         writemsg_level("!!! You have no world file.\n",
13587                                         level=logging.WARNING, noiselevel=-1)
13588
13589                 writemsg_level("!!! Proceeding is likely to " + \
13590                         "break your installation.\n",
13591                         level=logging.WARNING, noiselevel=-1)
13592                 if "--pretend" not in myopts:
13593                         countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
13594
13595         if action == "depclean":
13596                 emergelog(xterm_titles, " >>> depclean")
13597
13598         import textwrap
13599         args_set = InternalPackageSet()
13600         if myfiles:
13601                 for x in myfiles:
13602                         if not is_valid_package_atom(x):
13603                                 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
13604                                         level=logging.ERROR, noiselevel=-1)
13605                                 writemsg_level("!!! Please check ebuild(5) for full details.\n")
13606                                 return
13607                         try:
13608                                 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
13609                         except portage.exception.AmbiguousPackageName, e:
13610                                 msg = "The short ebuild name \"" + x + \
13611                                         "\" is ambiguous.  Please specify " + \
13612                                         "one of the following " + \
13613                                         "fully-qualified ebuild names instead:"
13614                                 for line in textwrap.wrap(msg, 70):
13615                                         writemsg_level("!!! %s\n" % (line,),
13616                                                 level=logging.ERROR, noiselevel=-1)
13617                                 for i in e[0]:
13618                                         writemsg_level("    %s\n" % colorize("INFORM", i),
13619                                                 level=logging.ERROR, noiselevel=-1)
13620                                 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13621                                 return
13622                         args_set.add(atom)
13623                 matched_packages = False
13624                 for x in args_set:
13625                         if vardb.match(x):
13626                                 matched_packages = True
13627                                 break
13628                 if not matched_packages:
13629                         writemsg_level(">>> No packages selected for removal by %s\n" % \
13630                                 action)
13631                         return
13632
13633         writemsg_level("\nCalculating dependencies  ")
13634         resolver_params = create_depgraph_params(myopts, "remove")
13635         resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
13636         vardb = resolver.trees[myroot]["vartree"].dbapi
13637
13638         if action == "depclean":
13639
13640                 if args_set:
13641                         # Pull in everything that's installed but not matched
13642                         # by an argument atom since we don't want to clean any
13643                         # package if something depends on it.
13644
13645                         world_temp_set.clear()
13646                         for pkg in vardb:
13647                                 spinner.update()
13648
13649                                 try:
13650                                         if args_set.findAtomForPackage(pkg) is None:
13651                                                 world_temp_set.add("=" + pkg.cpv)
13652                                                 continue
13653                                 except portage.exception.InvalidDependString, e:
13654                                         show_invalid_depstring_notice(pkg,
13655                                                 pkg.metadata["PROVIDE"], str(e))
13656                                         del e
13657                                         world_temp_set.add("=" + pkg.cpv)
13658                                         continue
13659
13660         elif action == "prune":
13661
13662                 # Pull in everything that's installed since we don't
13663                 # to prune a package if something depends on it.
13664                 world_temp_set.clear()
13665                 world_temp_set.update(vardb.cp_all())
13666
13667                 if not args_set:
13668
13669                         # Try to prune everything that's slotted.
13670                         for cp in vardb.cp_all():
13671                                 if len(vardb.cp_list(cp)) > 1:
13672                                         args_set.add(cp)
13673
13674                 # Remove atoms from world that match installed packages
13675                 # that are also matched by argument atoms, but do not remove
13676                 # them if they match the highest installed version.
13677                 for pkg in vardb:
13678                         spinner.update()
13679                         pkgs_for_cp = vardb.match_pkgs(pkg.cp)
13680                         if not pkgs_for_cp or pkg not in pkgs_for_cp:
13681                                 raise AssertionError("package expected in matches: " + \
13682                                         "cp = %s, cpv = %s matches = %s" % \
13683                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13684
13685                         highest_version = pkgs_for_cp[-1]
13686                         if pkg == highest_version:
13687                                 # pkg is the highest version
13688                                 world_temp_set.add("=" + pkg.cpv)
13689                                 continue
13690
13691                         if len(pkgs_for_cp) <= 1:
13692                                 raise AssertionError("more packages expected: " + \
13693                                         "cp = %s, cpv = %s matches = %s" % \
13694                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13695
13696                         try:
13697                                 if args_set.findAtomForPackage(pkg) is None:
13698                                         world_temp_set.add("=" + pkg.cpv)
13699                                         continue
13700                         except portage.exception.InvalidDependString, e:
13701                                 show_invalid_depstring_notice(pkg,
13702                                         pkg.metadata["PROVIDE"], str(e))
13703                                 del e
13704                                 world_temp_set.add("=" + pkg.cpv)
13705                                 continue
13706
13707         set_args = {}
13708         for s, package_set in required_sets.iteritems():
13709                 set_atom = SETPREFIX + s
13710                 set_arg = SetArg(arg=set_atom, set=package_set,
13711                         root_config=resolver.roots[myroot])
13712                 set_args[s] = set_arg
13713                 for atom in set_arg.set:
13714                         resolver._dep_stack.append(
13715                                 Dependency(atom=atom, root=myroot, parent=set_arg))
13716                         resolver.digraph.add(set_arg, None)
13717
13718         success = resolver._complete_graph()
13719         writemsg_level("\b\b... done!\n")
13720
13721         resolver.display_problems()
13722
13723         if not success:
13724                 return 1
13725
13726         def unresolved_deps():
13727
13728                 unresolvable = set()
13729                 for dep in resolver._initially_unsatisfied_deps:
13730                         if isinstance(dep.parent, Package) and \
13731                                 (dep.priority > UnmergeDepPriority.SOFT):
13732                                 unresolvable.add((dep.atom, dep.parent.cpv))
13733
13734                 if not unresolvable:
13735                         return False
13736
13737                 if unresolvable and not allow_missing_deps:
13738                         prefix = bad(" * ")
13739                         msg = []
13740                         msg.append("Dependencies could not be completely resolved due to")
13741                         msg.append("the following required packages not being installed:")
13742                         msg.append("")
13743                         for atom, parent in unresolvable:
13744                                 msg.append("  %s pulled in by:" % (atom,))
13745                                 msg.append("    %s" % (parent,))
13746                                 msg.append("")
13747                         msg.append("Have you forgotten to run " + \
13748                                 good("`emerge --update --newuse --deep @system @world`") + " prior")
13749                         msg.append(("to %s? It may be necessary to manually " + \
13750                                 "uninstall packages that no longer") % action)
13751                         msg.append("exist in the portage tree since " + \
13752                                 "it may not be possible to satisfy their")
13753                         msg.append("dependencies.  Also, be aware of " + \
13754                                 "the --with-bdeps option that is documented")
13755                         msg.append("in " + good("`man emerge`") + ".")
13756                         if action == "prune":
13757                                 msg.append("")
13758                                 msg.append("If you would like to ignore " + \
13759                                         "dependencies then use %s." % good("--nodeps"))
13760                         writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
13761                                 level=logging.ERROR, noiselevel=-1)
13762                         return True
13763                 return False
13764
13765         if unresolved_deps():
13766                 return 1
13767
13768         graph = resolver.digraph.copy()
13769         required_pkgs_total = 0
13770         for node in graph:
13771                 if isinstance(node, Package):
13772                         required_pkgs_total += 1
13773
13774         def show_parents(child_node):
13775                 parent_nodes = graph.parent_nodes(child_node)
13776                 if not parent_nodes:
13777                         # With --prune, the highest version can be pulled in without any
13778                         # real parent since all installed packages are pulled in.  In that
13779                         # case there's nothing to show here.
13780                         return
13781                 parent_strs = []
13782                 for node in parent_nodes:
13783                         parent_strs.append(str(getattr(node, "cpv", node)))
13784                 parent_strs.sort()
13785                 msg = []
13786                 msg.append("  %s pulled in by:\n" % (child_node.cpv,))
13787                 for parent_str in parent_strs:
13788                         msg.append("    %s\n" % (parent_str,))
13789                 msg.append("\n")
13790                 portage.writemsg_stdout("".join(msg), noiselevel=-1)
13791
13792         def cmp_pkg_cpv(pkg1, pkg2):
13793                 """Sort Package instances by cpv."""
13794                 if pkg1.cpv > pkg2.cpv:
13795                         return 1
13796                 elif pkg1.cpv == pkg2.cpv:
13797                         return 0
13798                 else:
13799                         return -1
13800
13801         def create_cleanlist():
13802                 pkgs_to_remove = []
13803
13804                 if action == "depclean":
13805                         if args_set:
13806
13807                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13808                                         arg_atom = None
13809                                         try:
13810                                                 arg_atom = args_set.findAtomForPackage(pkg)
13811                                         except portage.exception.InvalidDependString:
13812                                                 # this error has already been displayed by now
13813                                                 continue
13814
13815                                         if arg_atom:
13816                                                 if pkg not in graph:
13817                                                         pkgs_to_remove.append(pkg)
13818                                                 elif "--verbose" in myopts:
13819                                                         show_parents(pkg)
13820
13821                         else:
13822                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13823                                         if pkg not in graph:
13824                                                 pkgs_to_remove.append(pkg)
13825                                         elif "--verbose" in myopts:
13826                                                 show_parents(pkg)
13827
13828                 elif action == "prune":
13829                         # Prune really uses all installed instead of world. It's not
13830                         # a real reverse dependency so don't display it as such.
13831                         graph.remove(set_args["world"])
13832
13833                         for atom in args_set:
13834                                 for pkg in vardb.match_pkgs(atom):
13835                                         if pkg not in graph:
13836                                                 pkgs_to_remove.append(pkg)
13837                                         elif "--verbose" in myopts:
13838                                                 show_parents(pkg)
13839
13840                 if not pkgs_to_remove:
13841                         writemsg_level(
13842                                 ">>> No packages selected for removal by %s\n" % action)
13843                         if "--verbose" not in myopts:
13844                                 writemsg_level(
13845                                         ">>> To see reverse dependencies, use %s\n" % \
13846                                                 good("--verbose"))
13847                         if action == "prune":
13848                                 writemsg_level(
13849                                         ">>> To ignore dependencies, use %s\n" % \
13850                                                 good("--nodeps"))
13851
13852                 return pkgs_to_remove
13853
13854         cleanlist = create_cleanlist()
13855
13856         if len(cleanlist):
13857                 clean_set = set(cleanlist)
13858
13859                 # Check if any of these package are the sole providers of libraries
13860                 # with consumers that have not been selected for removal. If so, these
13861                 # packages and any dependencies need to be added to the graph.
13862                 real_vardb = trees[myroot]["vartree"].dbapi
13863                 linkmap = real_vardb.linkmap
13864                 liblist = linkmap.listLibraryObjects()
13865                 consumer_cache = {}
13866                 provider_cache = {}
13867                 soname_cache = {}
13868                 consumer_map = {}
13869
13870                 writemsg_level(">>> Checking for lib consumers...\n")
13871
13872                 for pkg in cleanlist:
13873                         pkg_dblink = real_vardb._dblink(pkg.cpv)
13874                         provided_libs = set()
13875
13876                         for lib in liblist:
13877                                 if pkg_dblink.isowner(lib, myroot):
13878                                         provided_libs.add(lib)
13879
13880                         if not provided_libs:
13881                                 continue
13882
13883                         consumers = {}
13884                         for lib in provided_libs:
13885                                 lib_consumers = consumer_cache.get(lib)
13886                                 if lib_consumers is None:
13887                                         lib_consumers = linkmap.findConsumers(lib)
13888                                         consumer_cache[lib] = lib_consumers
13889                                 if lib_consumers:
13890                                         consumers[lib] = lib_consumers
13891
13892                         if not consumers:
13893                                 continue
13894
13895                         for lib, lib_consumers in consumers.items():
13896                                 for consumer_file in list(lib_consumers):
13897                                         if pkg_dblink.isowner(consumer_file, myroot):
13898                                                 lib_consumers.remove(consumer_file)
13899                                 if not lib_consumers:
13900                                         del consumers[lib]
13901
13902                         if not consumers:
13903                                 continue
13904
13905                         for lib, lib_consumers in consumers.iteritems():
13906
13907                                 soname = soname_cache.get(lib)
13908                                 if soname is None:
13909                                         soname = linkmap.getSoname(lib)
13910                                         soname_cache[lib] = soname
13911
13912                                 consumer_providers = []
13913                                 for lib_consumer in lib_consumers:
13914                                         providers = provider_cache.get(lib)
13915                                         if providers is None:
13916                                                 providers = linkmap.findProviders(lib_consumer)
13917                                                 provider_cache[lib_consumer] = providers
13918                                         if soname not in providers:
13919                                                 # Why does this happen?
13920                                                 continue
13921                                         consumer_providers.append(
13922                                                 (lib_consumer, providers[soname]))
13923
13924                                 consumers[lib] = consumer_providers
13925
13926                         consumer_map[pkg] = consumers
13927
13928                 if consumer_map:
13929
13930                         search_files = set()
13931                         for consumers in consumer_map.itervalues():
13932                                 for lib, consumer_providers in consumers.iteritems():
13933                                         for lib_consumer, providers in consumer_providers:
13934                                                 search_files.add(lib_consumer)
13935                                                 search_files.update(providers)
13936
13937                         writemsg_level(">>> Assigning files to packages...\n")
13938                         file_owners = real_vardb._owners.getFileOwnerMap(search_files)
13939
13940                         for pkg, consumers in consumer_map.items():
13941                                 for lib, consumer_providers in consumers.items():
13942                                         lib_consumers = set()
13943
13944                                         for lib_consumer, providers in consumer_providers:
13945                                                 owner_set = file_owners.get(lib_consumer)
13946                                                 provider_dblinks = set()
13947                                                 provider_pkgs = set()
13948
13949                                                 if len(providers) > 1:
13950                                                         for provider in providers:
13951                                                                 provider_set = file_owners.get(provider)
13952                                                                 if provider_set is not None:
13953                                                                         provider_dblinks.update(provider_set)
13954
13955                                                 if len(provider_dblinks) > 1:
13956                                                         for provider_dblink in provider_dblinks:
13957                                                                 pkg_key = ("installed", myroot,
13958                                                                         provider_dblink.mycpv, "nomerge")
13959                                                                 if pkg_key not in clean_set:
13960                                                                         provider_pkgs.add(vardb.get(pkg_key))
13961
13962                                                 if provider_pkgs:
13963                                                         continue
13964
13965                                                 if owner_set is not None:
13966                                                         lib_consumers.update(owner_set)
13967
13968                                         for consumer_dblink in list(lib_consumers):
13969                                                 if ("installed", myroot, consumer_dblink.mycpv,
13970                                                         "nomerge") in clean_set:
13971                                                         lib_consumers.remove(consumer_dblink)
13972                                                         continue
13973
13974                                         if lib_consumers:
13975                                                 consumers[lib] = lib_consumers
13976                                         else:
13977                                                 del consumers[lib]
13978                                 if not consumers:
13979                                         del consumer_map[pkg]
13980
13981                 if consumer_map:
13982                         # TODO: Implement a package set for rebuilding consumer packages.
13983
13984                         msg = "In order to avoid breakage of link level " + \
13985                                 "dependencies, one or more packages will not be removed. " + \
13986                                 "This can be solved by rebuilding " + \
13987                                 "the packages that pulled them in."
13988
13989                         prefix = bad(" * ")
13990                         from textwrap import wrap
13991                         writemsg_level("".join(prefix + "%s\n" % line for \
13992                                 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
13993
13994                         msg = []
13995                         for pkg, consumers in consumer_map.iteritems():
13996                                 unique_consumers = set(chain(*consumers.values()))
13997                                 unique_consumers = sorted(consumer.mycpv \
13998                                         for consumer in unique_consumers)
13999                                 msg.append("")
14000                                 msg.append("  %s pulled in by:" % (pkg.cpv,))
14001                                 for consumer in unique_consumers:
14002                                         msg.append("    %s" % (consumer,))
14003                         msg.append("")
14004                         writemsg_level("".join(prefix + "%s\n" % line for line in msg),
14005                                 level=logging.WARNING, noiselevel=-1)
14006
14007                         # Add lib providers to the graph as children of lib consumers,
14008                         # and also add any dependencies pulled in by the provider.
14009                         writemsg_level(">>> Adding lib providers to graph...\n")
14010
14011                         for pkg, consumers in consumer_map.iteritems():
14012                                 for consumer_dblink in set(chain(*consumers.values())):
14013                                         consumer_pkg = vardb.get(("installed", myroot,
14014                                                 consumer_dblink.mycpv, "nomerge"))
14015                                         if not resolver._add_pkg(pkg,
14016                                                 Dependency(parent=consumer_pkg,
14017                                                 priority=UnmergeDepPriority(runtime=True),
14018                                                 root=pkg.root)):
14019                                                 resolver.display_problems()
14020                                                 return 1
14021
14022                         writemsg_level("\nCalculating dependencies  ")
14023                         success = resolver._complete_graph()
14024                         writemsg_level("\b\b... done!\n")
14025                         resolver.display_problems()
14026                         if not success:
14027                                 return 1
14028                         if unresolved_deps():
14029                                 return 1
14030
14031                         graph = resolver.digraph.copy()
14032                         required_pkgs_total = 0
14033                         for node in graph:
14034                                 if isinstance(node, Package):
14035                                         required_pkgs_total += 1
14036                         cleanlist = create_cleanlist()
14037                         if not cleanlist:
14038                                 return 0
14039                         clean_set = set(cleanlist)
14040
14041                 # Use a topological sort to create an unmerge order such that
14042                 # each package is unmerged before it's dependencies. This is
14043                 # necessary to avoid breaking things that may need to run
14044                 # during pkg_prerm or pkg_postrm phases.
14045
14046                 # Create a new graph to account for dependencies between the
14047                 # packages being unmerged.
14048                 graph = digraph()
14049                 del cleanlist[:]
14050
14051                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
14052                 runtime = UnmergeDepPriority(runtime=True)
14053                 runtime_post = UnmergeDepPriority(runtime_post=True)
14054                 buildtime = UnmergeDepPriority(buildtime=True)
14055                 priority_map = {
14056                         "RDEPEND": runtime,
14057                         "PDEPEND": runtime_post,
14058                         "DEPEND": buildtime,
14059                 }
14060
14061                 for node in clean_set:
14062                         graph.add(node, None)
14063                         mydeps = []
14064                         node_use = node.metadata["USE"].split()
14065                         for dep_type in dep_keys:
14066                                 depstr = node.metadata[dep_type]
14067                                 if not depstr:
14068                                         continue
14069                                 try:
14070                                         portage.dep._dep_check_strict = False
14071                                         success, atoms = portage.dep_check(depstr, None, settings,
14072                                                 myuse=node_use, trees=resolver._graph_trees,
14073                                                 myroot=myroot)
14074                                 finally:
14075                                         portage.dep._dep_check_strict = True
14076                                 if not success:
14077                                         # Ignore invalid deps of packages that will
14078                                         # be uninstalled anyway.
14079                                         continue
14080
14081                                 priority = priority_map[dep_type]
14082                                 for atom in atoms:
14083                                         if not isinstance(atom, portage.dep.Atom):
14084                                                 # Ignore invalid atoms returned from dep_check().
14085                                                 continue
14086                                         if atom.blocker:
14087                                                 continue
14088                                         matches = vardb.match_pkgs(atom)
14089                                         if not matches:
14090                                                 continue
14091                                         for child_node in matches:
14092                                                 if child_node in clean_set:
14093                                                         graph.add(child_node, node, priority=priority)
14094
14095                 ordered = True
14096                 if len(graph.order) == len(graph.root_nodes()):
14097                         # If there are no dependencies between packages
14098                         # let unmerge() group them by cat/pn.
14099                         ordered = False
14100                         cleanlist = [pkg.cpv for pkg in graph.order]
14101                 else:
14102                         # Order nodes from lowest to highest overall reference count for
14103                         # optimal root node selection.
14104                         node_refcounts = {}
14105                         for node in graph.order:
14106                                 node_refcounts[node] = len(graph.parent_nodes(node))
14107                         def cmp_reference_count(node1, node2):
14108                                 return node_refcounts[node1] - node_refcounts[node2]
14109                         graph.order.sort(key=cmp_sort_key(cmp_reference_count))
14110         
14111                         ignore_priority_range = [None]
14112                         ignore_priority_range.extend(
14113                                 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
14114                         while not graph.empty():
14115                                 for ignore_priority in ignore_priority_range:
14116                                         nodes = graph.root_nodes(ignore_priority=ignore_priority)
14117                                         if nodes:
14118                                                 break
14119                                 if not nodes:
14120                                         raise AssertionError("no root nodes")
14121                                 if ignore_priority is not None:
14122                                         # Some deps have been dropped due to circular dependencies,
14123                                         # so only pop one node in order do minimize the number that
14124                                         # are dropped.
14125                                         del nodes[1:]
14126                                 for node in nodes:
14127                                         graph.remove(node)
14128                                         cleanlist.append(node.cpv)
14129
14130                 unmerge(root_config, myopts, "unmerge", cleanlist,
14131                         ldpath_mtimes, ordered=ordered)
14132
14133         if action == "prune":
14134                 return
14135
14136         if not cleanlist and "--quiet" in myopts:
14137                 return
14138
14139         print "Packages installed:   "+str(len(vardb.cpv_all()))
14140         print "Packages in world:    " + \
14141                 str(len(root_config.sets["world"].getAtoms()))
14142         print "Packages in system:   " + \
14143                 str(len(root_config.sets["system"].getAtoms()))
14144         print "Required packages:    "+str(required_pkgs_total)
14145         if "--pretend" in myopts:
14146                 print "Number to remove:     "+str(len(cleanlist))
14147         else:
14148                 print "Number removed:       "+str(len(cleanlist))
14149
14150 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
14151         """
14152         Construct a depgraph for the given resume list. This will raise
14153         PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
14154         @rtype: tuple
14155         @returns: (success, depgraph, dropped_tasks)
14156         """
14157         skip_masked = True
14158         skip_unsatisfied = True
14159         mergelist = mtimedb["resume"]["mergelist"]
14160         dropped_tasks = set()
14161         while True:
14162                 mydepgraph = depgraph(settings, trees,
14163                         myopts, myparams, spinner)
14164                 try:
14165                         success = mydepgraph.loadResumeCommand(mtimedb["resume"],
14166                                 skip_masked=skip_masked)
14167                 except depgraph.UnsatisfiedResumeDep, e:
14168                         if not skip_unsatisfied:
14169                                 raise
14170
14171                         graph = mydepgraph.digraph
14172                         unsatisfied_parents = dict((dep.parent, dep.parent) \
14173                                 for dep in e.value)
14174                         traversed_nodes = set()
14175                         unsatisfied_stack = list(unsatisfied_parents)
14176                         while unsatisfied_stack:
14177                                 pkg = unsatisfied_stack.pop()
14178                                 if pkg in traversed_nodes:
14179                                         continue
14180                                 traversed_nodes.add(pkg)
14181
14182                                 # If this package was pulled in by a parent
14183                                 # package scheduled for merge, removing this
14184                                 # package may cause the the parent package's
14185                                 # dependency to become unsatisfied.
14186                                 for parent_node in graph.parent_nodes(pkg):
14187                                         if not isinstance(parent_node, Package) \
14188                                                 or parent_node.operation not in ("merge", "nomerge"):
14189                                                 continue
14190                                         unsatisfied = \
14191                                                 graph.child_nodes(parent_node,
14192                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
14193                                         if pkg in unsatisfied:
14194                                                 unsatisfied_parents[parent_node] = parent_node
14195                                                 unsatisfied_stack.append(parent_node)
14196
14197                         pruned_mergelist = []
14198                         for x in mergelist:
14199                                 if isinstance(x, list) and \
14200                                         tuple(x) not in unsatisfied_parents:
14201                                         pruned_mergelist.append(x)
14202
14203                         # If the mergelist doesn't shrink then this loop is infinite.
14204                         if len(pruned_mergelist) == len(mergelist):
14205                                 # This happens if a package can't be dropped because
14206                                 # it's already installed, but it has unsatisfied PDEPEND.
14207                                 raise
14208                         mergelist[:] = pruned_mergelist
14209
14210                         # Exclude installed packages that have been removed from the graph due
14211                         # to failure to build/install runtime dependencies after the dependent
14212                         # package has already been installed.
14213                         dropped_tasks.update(pkg for pkg in \
14214                                 unsatisfied_parents if pkg.operation != "nomerge")
14215                         mydepgraph.break_refs(unsatisfied_parents)
14216
14217                         del e, graph, traversed_nodes, \
14218                                 unsatisfied_parents, unsatisfied_stack
14219                         continue
14220                 else:
14221                         break
14222         return (success, mydepgraph, dropped_tasks)
14223
14224 def action_build(settings, trees, mtimedb,
14225         myopts, myaction, myfiles, spinner):
14226
14227         # validate the state of the resume data
14228         # so that we can make assumptions later.
14229         for k in ("resume", "resume_backup"):
14230                 if k not in mtimedb:
14231                         continue
14232                 resume_data = mtimedb[k]
14233                 if not isinstance(resume_data, dict):
14234                         del mtimedb[k]
14235                         continue
14236                 mergelist = resume_data.get("mergelist")
14237                 if not isinstance(mergelist, list):
14238                         del mtimedb[k]
14239                         continue
14240                 for x in mergelist:
14241                         if not (isinstance(x, list) and len(x) == 4):
14242                                 continue
14243                         pkg_type, pkg_root, pkg_key, pkg_action = x
14244                         if pkg_root not in trees:
14245                                 # Current $ROOT setting differs,
14246                                 # so the list must be stale.
14247                                 mergelist = None
14248                                 break
14249                 if not mergelist:
14250                         del mtimedb[k]
14251                         continue
14252                 resume_opts = resume_data.get("myopts")
14253                 if not isinstance(resume_opts, (dict, list)):
14254                         del mtimedb[k]
14255                         continue
14256                 favorites = resume_data.get("favorites")
14257                 if not isinstance(favorites, list):
14258                         del mtimedb[k]
14259                         continue
14260
14261         resume = False
14262         if "--resume" in myopts and \
14263                 ("resume" in mtimedb or
14264                 "resume_backup" in mtimedb):
14265                 resume = True
14266                 if "resume" not in mtimedb:
14267                         mtimedb["resume"] = mtimedb["resume_backup"]
14268                         del mtimedb["resume_backup"]
14269                         mtimedb.commit()
14270                 # "myopts" is a list for backward compatibility.
14271                 resume_opts = mtimedb["resume"].get("myopts", [])
14272                 if isinstance(resume_opts, list):
14273                         resume_opts = dict((k,True) for k in resume_opts)
14274                 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
14275                         resume_opts.pop(opt, None)
14276                 myopts.update(resume_opts)
14277
14278                 if "--debug" in myopts:
14279                         writemsg_level("myopts %s\n" % (myopts,))
14280
14281                 # Adjust config according to options of the command being resumed.
14282                 for myroot in trees:
14283                         mysettings =  trees[myroot]["vartree"].settings
14284                         mysettings.unlock()
14285                         adjust_config(myopts, mysettings)
14286                         mysettings.lock()
14287                         del myroot, mysettings
14288
14289         ldpath_mtimes = mtimedb["ldpath"]
14290         favorites=[]
14291         merge_count = 0
14292         buildpkgonly = "--buildpkgonly" in myopts
14293         pretend = "--pretend" in myopts
14294         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14295         ask = "--ask" in myopts
14296         nodeps = "--nodeps" in myopts
14297         oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
14298         tree = "--tree" in myopts
14299         if nodeps and tree:
14300                 tree = False
14301                 del myopts["--tree"]
14302                 portage.writemsg(colorize("WARN", " * ") + \
14303                         "--tree is broken with --nodeps. Disabling...\n")
14304         debug = "--debug" in myopts
14305         verbose = "--verbose" in myopts
14306         quiet = "--quiet" in myopts
14307         if pretend or fetchonly:
14308                 # make the mtimedb readonly
14309                 mtimedb.filename = None
14310         if '--digest' in myopts or 'digest' in settings.features:
14311                 if '--digest' in myopts:
14312                         msg = "The --digest option"
14313                 else:
14314                         msg = "The FEATURES=digest setting"
14315
14316                 msg += " can prevent corruption from being" + \
14317                         " noticed. The `repoman manifest` command is the preferred" + \
14318                         " way to generate manifests and it is capable of doing an" + \
14319                         " entire repository or category at once."
14320                 prefix = bad(" * ")
14321                 writemsg(prefix + "\n")
14322                 from textwrap import wrap
14323                 for line in wrap(msg, 72):
14324                         writemsg("%s%s\n" % (prefix, line))
14325                 writemsg(prefix + "\n")
14326
14327         if "--quiet" not in myopts and \
14328                 ("--pretend" in myopts or "--ask" in myopts or \
14329                 "--tree" in myopts or "--verbose" in myopts):
14330                 action = ""
14331                 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14332                         action = "fetched"
14333                 elif "--buildpkgonly" in myopts:
14334                         action = "built"
14335                 else:
14336                         action = "merged"
14337                 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
14338                         print
14339                         print darkgreen("These are the packages that would be %s, in reverse order:") % action
14340                         print
14341                 else:
14342                         print
14343                         print darkgreen("These are the packages that would be %s, in order:") % action
14344                         print
14345
14346         show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
14347         if not show_spinner:
14348                 spinner.update = spinner.update_quiet
14349
14350         if resume:
14351                 favorites = mtimedb["resume"].get("favorites")
14352                 if not isinstance(favorites, list):
14353                         favorites = []
14354
14355                 if show_spinner:
14356                         print "Calculating dependencies  ",
14357                 myparams = create_depgraph_params(myopts, myaction)
14358
14359                 resume_data = mtimedb["resume"]
14360                 mergelist = resume_data["mergelist"]
14361                 if mergelist and "--skipfirst" in myopts:
14362                         for i, task in enumerate(mergelist):
14363                                 if isinstance(task, list) and \
14364                                         task and task[-1] == "merge":
14365                                         del mergelist[i]
14366                                         break
14367
14368                 success = False
14369                 mydepgraph = None
14370                 try:
14371                         success, mydepgraph, dropped_tasks = resume_depgraph(
14372                                 settings, trees, mtimedb, myopts, myparams, spinner)
14373                 except (portage.exception.PackageNotFound,
14374                         depgraph.UnsatisfiedResumeDep), e:
14375                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
14376                                 mydepgraph = e.depgraph
14377                         if show_spinner:
14378                                 print
14379                         from textwrap import wrap
14380                         from portage.output import EOutput
14381                         out = EOutput()
14382
14383                         resume_data = mtimedb["resume"]
14384                         mergelist = resume_data.get("mergelist")
14385                         if not isinstance(mergelist, list):
14386                                 mergelist = []
14387                         if mergelist and debug or (verbose and not quiet):
14388                                 out.eerror("Invalid resume list:")
14389                                 out.eerror("")
14390                                 indent = "  "
14391                                 for task in mergelist:
14392                                         if isinstance(task, list):
14393                                                 out.eerror(indent + str(tuple(task)))
14394                                 out.eerror("")
14395
14396                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
14397                                 out.eerror("One or more packages are either masked or " + \
14398                                         "have missing dependencies:")
14399                                 out.eerror("")
14400                                 indent = "  "
14401                                 for dep in e.value:
14402                                         if dep.atom is None:
14403                                                 out.eerror(indent + "Masked package:")
14404                                                 out.eerror(2 * indent + str(dep.parent))
14405                                                 out.eerror("")
14406                                         else:
14407                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
14408                                                 out.eerror(2 * indent + str(dep.parent))
14409                                                 out.eerror("")
14410                                 msg = "The resume list contains packages " + \
14411                                         "that are either masked or have " + \
14412                                         "unsatisfied dependencies. " + \
14413                                         "Please restart/continue " + \
14414                                         "the operation manually, or use --skipfirst " + \
14415                                         "to skip the first package in the list and " + \
14416                                         "any other packages that may be " + \
14417                                         "masked or have missing dependencies."
14418                                 for line in wrap(msg, 72):
14419                                         out.eerror(line)
14420                         elif isinstance(e, portage.exception.PackageNotFound):
14421                                 out.eerror("An expected package is " + \
14422                                         "not available: %s" % str(e))
14423                                 out.eerror("")
14424                                 msg = "The resume list contains one or more " + \
14425                                         "packages that are no longer " + \
14426                                         "available. Please restart/continue " + \
14427                                         "the operation manually."
14428                                 for line in wrap(msg, 72):
14429                                         out.eerror(line)
14430                 else:
14431                         if show_spinner:
14432                                 print "\b\b... done!"
14433
14434                 if success:
14435                         if dropped_tasks:
14436                                 portage.writemsg("!!! One or more packages have been " + \
14437                                         "dropped due to\n" + \
14438                                         "!!! masking or unsatisfied dependencies:\n\n",
14439                                         noiselevel=-1)
14440                                 for task in dropped_tasks:
14441                                         portage.writemsg("  " + str(task) + "\n", noiselevel=-1)
14442                                 portage.writemsg("\n", noiselevel=-1)
14443                         del dropped_tasks
14444                 else:
14445                         if mydepgraph is not None:
14446                                 mydepgraph.display_problems()
14447                         if not (ask or pretend):
14448                                 # delete the current list and also the backup
14449                                 # since it's probably stale too.
14450                                 for k in ("resume", "resume_backup"):
14451                                         mtimedb.pop(k, None)
14452                                 mtimedb.commit()
14453
14454                         return 1
14455         else:
14456                 if ("--resume" in myopts):
14457                         print darkgreen("emerge: It seems we have nothing to resume...")
14458                         return os.EX_OK
14459
14460                 myparams = create_depgraph_params(myopts, myaction)
14461                 if "--quiet" not in myopts and "--nodeps" not in myopts:
14462                         print "Calculating dependencies  ",
14463                         sys.stdout.flush()
14464                 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
14465                 try:
14466                         retval, favorites = mydepgraph.select_files(myfiles)
14467                 except portage.exception.PackageNotFound, e:
14468                         portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
14469                         return 1
14470                 except portage.exception.PackageSetNotFound, e:
14471                         root_config = trees[settings["ROOT"]]["root_config"]
14472                         display_missing_pkg_set(root_config, e.value)
14473                         return 1
14474                 if show_spinner:
14475                         print "\b\b... done!"
14476                 if not retval:
14477                         mydepgraph.display_problems()
14478                         return 1
14479
14480         if "--pretend" not in myopts and \
14481                 ("--ask" in myopts or "--tree" in myopts or \
14482                 "--verbose" in myopts) and \
14483                 not ("--quiet" in myopts and "--ask" not in myopts):
14484                 if "--resume" in myopts:
14485                         mymergelist = mydepgraph.altlist()
14486                         if len(mymergelist) == 0:
14487                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14488                                 return os.EX_OK
14489                         favorites = mtimedb["resume"]["favorites"]
14490                         retval = mydepgraph.display(
14491                                 mydepgraph.altlist(reversed=tree),
14492                                 favorites=favorites)
14493                         mydepgraph.display_problems()
14494                         if retval != os.EX_OK:
14495                                 return retval
14496                         prompt="Would you like to resume merging these packages?"
14497                 else:
14498                         retval = mydepgraph.display(
14499                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
14500                                 favorites=favorites)
14501                         mydepgraph.display_problems()
14502                         if retval != os.EX_OK:
14503                                 return retval
14504                         mergecount=0
14505                         for x in mydepgraph.altlist():
14506                                 if isinstance(x, Package) and x.operation == "merge":
14507                                         mergecount += 1
14508
14509                         if mergecount==0:
14510                                 sets = trees[settings["ROOT"]]["root_config"].sets
14511                                 world_candidates = None
14512                                 if "--noreplace" in myopts and \
14513                                         not oneshot and favorites:
14514                                         # Sets that are not world candidates are filtered
14515                                         # out here since the favorites list needs to be
14516                                         # complete for depgraph.loadResumeCommand() to
14517                                         # operate correctly.
14518                                         world_candidates = [x for x in favorites \
14519                                                 if not (x.startswith(SETPREFIX) and \
14520                                                 not sets[x[1:]].world_candidate)]
14521                                 if "--noreplace" in myopts and \
14522                                         not oneshot and world_candidates:
14523                                         print
14524                                         for x in world_candidates:
14525                                                 print " %s %s" % (good("*"), x)
14526                                         prompt="Would you like to add these packages to your world favorites?"
14527                                 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
14528                                         prompt="Nothing to merge; would you like to auto-clean packages?"
14529                                 else:
14530                                         print
14531                                         print "Nothing to merge; quitting."
14532                                         print
14533                                         return os.EX_OK
14534                         elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14535                                 prompt="Would you like to fetch the source files for these packages?"
14536                         else:
14537                                 prompt="Would you like to merge these packages?"
14538                 print
14539                 if "--ask" in myopts and userquery(prompt) == "No":
14540                         print
14541                         print "Quitting."
14542                         print
14543                         return os.EX_OK
14544                 # Don't ask again (e.g. when auto-cleaning packages after merge)
14545                 myopts.pop("--ask", None)
14546
14547         if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14548                 if ("--resume" in myopts):
14549                         mymergelist = mydepgraph.altlist()
14550                         if len(mymergelist) == 0:
14551                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14552                                 return os.EX_OK
14553                         favorites = mtimedb["resume"]["favorites"]
14554                         retval = mydepgraph.display(
14555                                 mydepgraph.altlist(reversed=tree),
14556                                 favorites=favorites)
14557                         mydepgraph.display_problems()
14558                         if retval != os.EX_OK:
14559                                 return retval
14560                 else:
14561                         retval = mydepgraph.display(
14562                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
14563                                 favorites=favorites)
14564                         mydepgraph.display_problems()
14565                         if retval != os.EX_OK:
14566                                 return retval
14567                         if "--buildpkgonly" in myopts:
14568                                 graph_copy = mydepgraph.digraph.clone()
14569                                 removed_nodes = set()
14570                                 for node in graph_copy:
14571                                         if not isinstance(node, Package) or \
14572                                                 node.operation == "nomerge":
14573                                                 removed_nodes.add(node)
14574                                 graph_copy.difference_update(removed_nodes)
14575                                 if not graph_copy.hasallzeros(ignore_priority = \
14576                                         DepPrioritySatisfiedRange.ignore_medium):
14577                                         print "\n!!! --buildpkgonly requires all dependencies to be merged."
14578                                         print "!!! You have to merge the dependencies before you can build this package.\n"
14579                                         return 1
14580         else:
14581                 if "--buildpkgonly" in myopts:
14582                         graph_copy = mydepgraph.digraph.clone()
14583                         removed_nodes = set()
14584                         for node in graph_copy:
14585                                 if not isinstance(node, Package) or \
14586                                         node.operation == "nomerge":
14587                                         removed_nodes.add(node)
14588                         graph_copy.difference_update(removed_nodes)
14589                         if not graph_copy.hasallzeros(ignore_priority = \
14590                                 DepPrioritySatisfiedRange.ignore_medium):
14591                                 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14592                                 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
14593                                 return 1
14594
14595                 if ("--resume" in myopts):
14596                         favorites=mtimedb["resume"]["favorites"]
14597                         mymergelist = mydepgraph.altlist()
14598                         mydepgraph.break_refs(mymergelist)
14599                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
14600                                 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
14601                         del mydepgraph, mymergelist
14602                         clear_caches(trees)
14603
14604                         retval = mergetask.merge()
14605                         merge_count = mergetask.curval
14606                 else:
14607                         if "resume" in mtimedb and \
14608                         "mergelist" in mtimedb["resume"] and \
14609                         len(mtimedb["resume"]["mergelist"]) > 1:
14610                                 mtimedb["resume_backup"] = mtimedb["resume"]
14611                                 del mtimedb["resume"]
14612                                 mtimedb.commit()
14613                         mtimedb["resume"]={}
14614                         # Stored as a dict starting with portage-2.1.6_rc1, and supported
14615                         # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
14616                         # a list type for options.
14617                         mtimedb["resume"]["myopts"] = myopts.copy()
14618
14619                         # Convert Atom instances to plain str.
14620                         mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
14621
14622                         pkglist = mydepgraph.altlist()
14623                         mydepgraph.saveNomergeFavorites()
14624                         mydepgraph.break_refs(pkglist)
14625                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
14626                                 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
14627                         del mydepgraph, pkglist
14628                         clear_caches(trees)
14629
14630                         retval = mergetask.merge()
14631                         merge_count = mergetask.curval
14632
14633                 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
14634                         if "yes" == settings.get("AUTOCLEAN"):
14635                                 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
14636                                 unmerge(trees[settings["ROOT"]]["root_config"],
14637                                         myopts, "clean", [],
14638                                         ldpath_mtimes, autoclean=1)
14639                         else:
14640                                 portage.writemsg_stdout(colorize("WARN", "WARNING:")
14641                                         + " AUTOCLEAN is disabled.  This can cause serious"
14642                                         + " problems due to overlapping packages.\n")
14643                         trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
14644
14645                 return retval
14646
14647 def multiple_actions(action1, action2):
14648         sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
14649         sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
14650         sys.exit(1)
14651
14652 def insert_optional_args(args):
14653         """
14654         Parse optional arguments and insert a value if one has
14655         not been provided. This is done before feeding the args
14656         to the optparse parser since that parser does not support
14657         this feature natively.
14658         """
14659
14660         new_args = []
14661         jobs_opts = ("-j", "--jobs")
14662         arg_stack = args[:]
14663         arg_stack.reverse()
14664         while arg_stack:
14665                 arg = arg_stack.pop()
14666
14667                 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
14668                 if not (short_job_opt or arg in jobs_opts):
14669                         new_args.append(arg)
14670                         continue
14671
14672                 # Insert an empty placeholder in order to
14673                 # satisfy the requirements of optparse.
14674
14675                 new_args.append("--jobs")
14676                 job_count = None
14677                 saved_opts = None
14678                 if short_job_opt and len(arg) > 2:
14679                         if arg[:2] == "-j":
14680                                 try:
14681                                         job_count = int(arg[2:])
14682                                 except ValueError:
14683                                         saved_opts = arg[2:]
14684                         else:
14685                                 job_count = "True"
14686                                 saved_opts = arg[1:].replace("j", "")
14687
14688                 if job_count is None and arg_stack:
14689                         try:
14690                                 job_count = int(arg_stack[-1])
14691                         except ValueError:
14692                                 pass
14693                         else:
14694                                 # Discard the job count from the stack
14695                                 # since we're consuming it here.
14696                                 arg_stack.pop()
14697
14698                 if job_count is None:
14699                         # unlimited number of jobs
14700                         new_args.append("True")
14701                 else:
14702                         new_args.append(str(job_count))
14703
14704                 if saved_opts is not None:
14705                         new_args.append("-" + saved_opts)
14706
14707         return new_args
14708
14709 def parse_opts(tmpcmdline, silent=False):
14710         myaction=None
14711         myopts = {}
14712         myfiles=[]
14713
14714         global actions, options, shortmapping
14715
14716         longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
14717         argument_options = {
14718                 "--config-root": {
14719                         "help":"specify the location for portage configuration files",
14720                         "action":"store"
14721                 },
14722                 "--color": {
14723                         "help":"enable or disable color output",
14724                         "type":"choice",
14725                         "choices":("y", "n")
14726                 },
14727
14728                 "--jobs": {
14729
14730                         "help"   : "Specifies the number of packages to build " + \
14731                                 "simultaneously.",
14732
14733                         "action" : "store"
14734                 },
14735
14736                 "--load-average": {
14737
14738                         "help"   :"Specifies that no new builds should be started " + \
14739                                 "if there are other builds running and the load average " + \
14740                                 "is at least LOAD (a floating-point number).",
14741
14742                         "action" : "store"
14743                 },
14744
14745                 "--with-bdeps": {
14746                         "help":"include unnecessary build time dependencies",
14747                         "type":"choice",
14748                         "choices":("y", "n")
14749                 },
14750                 "--reinstall": {
14751                         "help":"specify conditions to trigger package reinstallation",
14752                         "type":"choice",
14753                         "choices":["changed-use"]
14754                 }
14755         }
14756
14757         from optparse import OptionParser
14758         parser = OptionParser()
14759         if parser.has_option("--help"):
14760                 parser.remove_option("--help")
14761
14762         for action_opt in actions:
14763                 parser.add_option("--" + action_opt, action="store_true",
14764                         dest=action_opt.replace("-", "_"), default=False)
14765         for myopt in options:
14766                 parser.add_option(myopt, action="store_true",
14767                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
14768         for shortopt, longopt in shortmapping.iteritems():
14769                 parser.add_option("-" + shortopt, action="store_true",
14770                         dest=longopt.lstrip("--").replace("-", "_"), default=False)
14771         for myalias, myopt in longopt_aliases.iteritems():
14772                 parser.add_option(myalias, action="store_true",
14773                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
14774
14775         for myopt, kwargs in argument_options.iteritems():
14776                 parser.add_option(myopt,
14777                         dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
14778
14779         tmpcmdline = insert_optional_args(tmpcmdline)
14780
14781         myoptions, myargs = parser.parse_args(args=tmpcmdline)
14782
14783         if myoptions.jobs:
14784                 jobs = None
14785                 if myoptions.jobs == "True":
14786                         jobs = True
14787                 else:
14788                         try:
14789                                 jobs = int(myoptions.jobs)
14790                         except ValueError:
14791                                 jobs = -1
14792
14793                 if jobs is not True and \
14794                         jobs < 1:
14795                         jobs = None
14796                         if not silent:
14797                                 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
14798                                         (myoptions.jobs,), noiselevel=-1)
14799
14800                 myoptions.jobs = jobs
14801
14802         if myoptions.load_average:
14803                 try:
14804                         load_average = float(myoptions.load_average)
14805                 except ValueError:
14806                         load_average = 0.0
14807
14808                 if load_average <= 0.0:
14809                         load_average = None
14810                         if not silent:
14811                                 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
14812                                         (myoptions.load_average,), noiselevel=-1)
14813
14814                 myoptions.load_average = load_average
14815
14816         for myopt in options:
14817                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
14818                 if v:
14819                         myopts[myopt] = True
14820
14821         for myopt in argument_options:
14822                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
14823                 if v is not None:
14824                         myopts[myopt] = v
14825
14826         if myoptions.searchdesc:
14827                 myoptions.search = True
14828
14829         for action_opt in actions:
14830                 v = getattr(myoptions, action_opt.replace("-", "_"))
14831                 if v:
14832                         if myaction:
14833                                 multiple_actions(myaction, action_opt)
14834                                 sys.exit(1)
14835                         myaction = action_opt
14836
14837         myfiles += myargs
14838
14839         return myaction, myopts, myfiles
14840
14841 def validate_ebuild_environment(trees):
14842         for myroot in trees:
14843                 settings = trees[myroot]["vartree"].settings
14844                 settings.validate()
14845
14846 def clear_caches(trees):
14847         for d in trees.itervalues():
14848                 d["porttree"].dbapi.melt()
14849                 d["porttree"].dbapi._aux_cache.clear()
14850                 d["bintree"].dbapi._aux_cache.clear()
14851                 d["bintree"].dbapi._clear_cache()
14852                 d["vartree"].dbapi.linkmap._clear_cache()
14853         portage.dircache.clear()
14854         gc.collect()
14855
14856 def load_emerge_config(trees=None):
14857         kwargs = {}
14858         for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
14859                 v = os.environ.get(envvar, None)
14860                 if v and v.strip():
14861                         kwargs[k] = v
14862         trees = portage.create_trees(trees=trees, **kwargs)
14863
14864         for root, root_trees in trees.iteritems():
14865                 settings = root_trees["vartree"].settings
14866                 setconfig = load_default_config(settings, root_trees)
14867                 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
14868
14869         settings = trees["/"]["vartree"].settings
14870
14871         for myroot in trees:
14872                 if myroot != "/":
14873                         settings = trees[myroot]["vartree"].settings
14874                         break
14875
14876         mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
14877         mtimedb = portage.MtimeDB(mtimedbfile)
14878         
14879         return settings, trees, mtimedb
14880
14881 def adjust_config(myopts, settings):
14882         """Make emerge specific adjustments to the config."""
14883
14884         # To enhance usability, make some vars case insensitive by forcing them to
14885         # lower case.
14886         for myvar in ("AUTOCLEAN", "NOCOLOR"):
14887                 if myvar in settings:
14888                         settings[myvar] = settings[myvar].lower()
14889                         settings.backup_changes(myvar)
14890         del myvar
14891
14892         # Kill noauto as it will break merges otherwise.
14893         if "noauto" in settings.features:
14894                 while "noauto" in settings.features:
14895                         settings.features.remove("noauto")
14896                 settings["FEATURES"] = " ".join(settings.features)
14897                 settings.backup_changes("FEATURES")
14898
14899         CLEAN_DELAY = 5
14900         try:
14901                 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
14902         except ValueError, e:
14903                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14904                 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
14905                         settings["CLEAN_DELAY"], noiselevel=-1)
14906         settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
14907         settings.backup_changes("CLEAN_DELAY")
14908
14909         EMERGE_WARNING_DELAY = 10
14910         try:
14911                 EMERGE_WARNING_DELAY = int(settings.get(
14912                         "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
14913         except ValueError, e:
14914                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14915                 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
14916                         settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
14917         settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
14918         settings.backup_changes("EMERGE_WARNING_DELAY")
14919
14920         if "--quiet" in myopts:
14921                 settings["PORTAGE_QUIET"]="1"
14922                 settings.backup_changes("PORTAGE_QUIET")
14923
14924         if "--verbose" in myopts:
14925                 settings["PORTAGE_VERBOSE"] = "1"
14926                 settings.backup_changes("PORTAGE_VERBOSE")
14927
14928         # Set so that configs will be merged regardless of remembered status
14929         if ("--noconfmem" in myopts):
14930                 settings["NOCONFMEM"]="1"
14931                 settings.backup_changes("NOCONFMEM")
14932
14933         # Set various debug markers... They should be merged somehow.
14934         PORTAGE_DEBUG = 0
14935         try:
14936                 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
14937                 if PORTAGE_DEBUG not in (0, 1):
14938                         portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
14939                                 PORTAGE_DEBUG, noiselevel=-1)
14940                         portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
14941                                 noiselevel=-1)
14942                         PORTAGE_DEBUG = 0
14943         except ValueError, e:
14944                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14945                 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
14946                         settings["PORTAGE_DEBUG"], noiselevel=-1)
14947                 del e
14948         if "--debug" in myopts:
14949                 PORTAGE_DEBUG = 1
14950         settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
14951         settings.backup_changes("PORTAGE_DEBUG")
14952
14953         if settings.get("NOCOLOR") not in ("yes","true"):
14954                 portage.output.havecolor = 1
14955
14956         """The explicit --color < y | n > option overrides the NOCOLOR environment
14957         variable and stdout auto-detection."""
14958         if "--color" in myopts:
14959                 if "y" == myopts["--color"]:
14960                         portage.output.havecolor = 1
14961                         settings["NOCOLOR"] = "false"
14962                 else:
14963                         portage.output.havecolor = 0
14964                         settings["NOCOLOR"] = "true"
14965                 settings.backup_changes("NOCOLOR")
14966         elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
14967                 portage.output.havecolor = 0
14968                 settings["NOCOLOR"] = "true"
14969                 settings.backup_changes("NOCOLOR")
14970
14971 def apply_priorities(settings):
14972         ionice(settings)
14973         nice(settings)
14974
14975 def nice(settings):
14976         try:
14977                 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
14978         except (OSError, ValueError), e:
14979                 out = portage.output.EOutput()
14980                 out.eerror("Failed to change nice value to '%s'" % \
14981                         settings["PORTAGE_NICENESS"])
14982                 out.eerror("%s\n" % str(e))
14983
14984 def ionice(settings):
14985
14986         ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
14987         if ionice_cmd:
14988                 ionice_cmd = shlex.split(ionice_cmd)
14989         if not ionice_cmd:
14990                 return
14991
14992         from portage.util import varexpand
14993         variables = {"PID" : str(os.getpid())}
14994         cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
14995
14996         try:
14997                 rval = portage.process.spawn(cmd, env=os.environ)
14998         except portage.exception.CommandNotFound:
14999                 # The OS kernel probably doesn't support ionice,
15000                 # so return silently.
15001                 return
15002
15003         if rval != os.EX_OK:
15004                 out = portage.output.EOutput()
15005                 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
15006                 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
15007
15008 def display_missing_pkg_set(root_config, set_name):
15009
15010         msg = []
15011         msg.append(("emerge: There are no sets to satisfy '%s'. " + \
15012                 "The following sets exist:") % \
15013                 colorize("INFORM", set_name))
15014         msg.append("")
15015
15016         for s in sorted(root_config.sets):
15017                 msg.append("    %s" % s)
15018         msg.append("")
15019
15020         writemsg_level("".join("%s\n" % l for l in msg),
15021                 level=logging.ERROR, noiselevel=-1)
15022
15023 def expand_set_arguments(myfiles, myaction, root_config):
15024         retval = os.EX_OK
15025         setconfig = root_config.setconfig
15026
15027         sets = setconfig.getSets()
15028
15029         # In order to know exactly which atoms/sets should be added to the
15030         # world file, the depgraph performs set expansion later. It will get
15031         # confused about where the atoms came from if it's not allowed to
15032         # expand them itself.
15033         do_not_expand = (None, )
15034         newargs = []
15035         for a in myfiles:
15036                 if a in ("system", "world"):
15037                         newargs.append(SETPREFIX+a)
15038                 else:
15039                         newargs.append(a)
15040         myfiles = newargs
15041         del newargs
15042         newargs = []
15043
15044         # separators for set arguments
15045         ARG_START = "{"
15046         ARG_END = "}"
15047
15048         # WARNING: all operators must be of equal length
15049         IS_OPERATOR = "/@"
15050         DIFF_OPERATOR = "-@"
15051         UNION_OPERATOR = "+@"
15052         
15053         for i in range(0, len(myfiles)):
15054                 if myfiles[i].startswith(SETPREFIX):
15055                         start = 0
15056                         end = 0
15057                         x = myfiles[i][len(SETPREFIX):]
15058                         newset = ""
15059                         while x:
15060                                 start = x.find(ARG_START)
15061                                 end = x.find(ARG_END)
15062                                 if start > 0 and start < end:
15063                                         namepart = x[:start]
15064                                         argpart = x[start+1:end]
15065                                 
15066                                         # TODO: implement proper quoting
15067                                         args = argpart.split(",")
15068                                         options = {}
15069                                         for a in args:
15070                                                 if "=" in a:
15071                                                         k, v  = a.split("=", 1)
15072                                                         options[k] = v
15073                                                 else:
15074                                                         options[a] = "True"
15075                                         setconfig.update(namepart, options)
15076                                         newset += (x[:start-len(namepart)]+namepart)
15077                                         x = x[end+len(ARG_END):]
15078                                 else:
15079                                         newset += x
15080                                         x = ""
15081                         myfiles[i] = SETPREFIX+newset
15082                                 
15083         sets = setconfig.getSets()
15084
15085         # display errors that occured while loading the SetConfig instance
15086         for e in setconfig.errors:
15087                 print colorize("BAD", "Error during set creation: %s" % e)
15088         
15089         # emerge relies on the existance of sets with names "world" and "system"
15090         required_sets = ("world", "system")
15091         missing_sets = []
15092
15093         for s in required_sets:
15094                 if s not in sets:
15095                         missing_sets.append(s)
15096         if missing_sets:
15097                 if len(missing_sets) > 2:
15098                         missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
15099                         missing_sets_str += ', and "%s"' % missing_sets[-1]
15100                 elif len(missing_sets) == 2:
15101                         missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
15102                 else:
15103                         missing_sets_str = '"%s"' % missing_sets[-1]
15104                 msg = ["emerge: incomplete set configuration, " + \
15105                         "missing set(s): %s" % missing_sets_str]
15106                 if sets:
15107                         msg.append("        sets defined: %s" % ", ".join(sets))
15108                 msg.append("        This usually means that '%s'" % \
15109                         (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
15110                 msg.append("        is missing or corrupt.")
15111                 for line in msg:
15112                         writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
15113                 return (None, 1)
15114         unmerge_actions = ("unmerge", "prune", "clean", "depclean")
15115
15116         for a in myfiles:
15117                 if a.startswith(SETPREFIX):
15118                         # support simple set operations (intersection, difference and union)
15119                         # on the commandline. Expressions are evaluated strictly left-to-right
15120                         if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
15121                                 expression = a[len(SETPREFIX):]
15122                                 expr_sets = []
15123                                 expr_ops = []
15124                                 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
15125                                         is_pos = expression.rfind(IS_OPERATOR)
15126                                         diff_pos = expression.rfind(DIFF_OPERATOR)
15127                                         union_pos = expression.rfind(UNION_OPERATOR)
15128                                         op_pos = max(is_pos, diff_pos, union_pos)
15129                                         s1 = expression[:op_pos]
15130                                         s2 = expression[op_pos+len(IS_OPERATOR):]
15131                                         op = expression[op_pos:op_pos+len(IS_OPERATOR)]
15132                                         if not s2 in sets:
15133                                                 display_missing_pkg_set(root_config, s2)
15134                                                 return (None, 1)
15135                                         expr_sets.insert(0, s2)
15136                                         expr_ops.insert(0, op)
15137                                         expression = s1
15138                                 if not expression in sets:
15139                                         display_missing_pkg_set(root_config, expression)
15140                                         return (None, 1)
15141                                 expr_sets.insert(0, expression)
15142                                 result = set(setconfig.getSetAtoms(expression))
15143                                 for i in range(0, len(expr_ops)):
15144                                         s2 = setconfig.getSetAtoms(expr_sets[i+1])
15145                                         if expr_ops[i] == IS_OPERATOR:
15146                                                 result.intersection_update(s2)
15147                                         elif expr_ops[i] == DIFF_OPERATOR:
15148                                                 result.difference_update(s2)
15149                                         elif expr_ops[i] == UNION_OPERATOR:
15150                                                 result.update(s2)
15151                                         else:
15152                                                 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
15153                                 newargs.extend(result)
15154                         else:                   
15155                                 s = a[len(SETPREFIX):]
15156                                 if s not in sets:
15157                                         display_missing_pkg_set(root_config, s)
15158                                         return (None, 1)
15159                                 setconfig.active.append(s)
15160                                 try:
15161                                         set_atoms = setconfig.getSetAtoms(s)
15162                                 except portage.exception.PackageSetNotFound, e:
15163                                         writemsg_level(("emerge: the given set '%s' " + \
15164                                                 "contains a non-existent set named '%s'.\n") % \
15165                                                 (s, e), level=logging.ERROR, noiselevel=-1)
15166                                         return (None, 1)
15167                                 if myaction in unmerge_actions and \
15168                                                 not sets[s].supportsOperation("unmerge"):
15169                                         sys.stderr.write("emerge: the given set '%s' does " % s + \
15170                                                 "not support unmerge operations\n")
15171                                         retval = 1
15172                                 elif not set_atoms:
15173                                         print "emerge: '%s' is an empty set" % s
15174                                 elif myaction not in do_not_expand:
15175                                         newargs.extend(set_atoms)
15176                                 else:
15177                                         newargs.append(SETPREFIX+s)
15178                                 for e in sets[s].errors:
15179                                         print e
15180                 else:
15181                         newargs.append(a)
15182         return (newargs, retval)
15183
15184 def repo_name_check(trees):
15185         missing_repo_names = set()
15186         for root, root_trees in trees.iteritems():
15187                 if "porttree" in root_trees:
15188                         portdb = root_trees["porttree"].dbapi
15189                         missing_repo_names.update(portdb.porttrees)
15190                         repos = portdb.getRepositories()
15191                         for r in repos:
15192                                 missing_repo_names.discard(portdb.getRepositoryPath(r))
15193                         if portdb.porttree_root in missing_repo_names and \
15194                                 not os.path.exists(os.path.join(
15195                                 portdb.porttree_root, "profiles")):
15196                                 # This is normal if $PORTDIR happens to be empty,
15197                                 # so don't warn about it.
15198                                 missing_repo_names.remove(portdb.porttree_root)
15199
15200         if missing_repo_names:
15201                 msg = []
15202                 msg.append("WARNING: One or more repositories " + \
15203                         "have missing repo_name entries:")
15204                 msg.append("")
15205                 for p in missing_repo_names:
15206                         msg.append("\t%s/profiles/repo_name" % (p,))
15207                 msg.append("")
15208                 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
15209                         "should be a plain text file containing a unique " + \
15210                         "name for the repository on the first line.", 70))
15211                 writemsg_level("".join("%s\n" % l for l in msg),
15212                         level=logging.WARNING, noiselevel=-1)
15213
15214         return bool(missing_repo_names)
15215
15216 def config_protect_check(trees):
15217         for root, root_trees in trees.iteritems():
15218                 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
15219                         msg = "!!! CONFIG_PROTECT is empty"
15220                         if root != "/":
15221                                 msg += " for '%s'" % root
15222                         writemsg_level(msg, level=logging.WARN, noiselevel=-1)
15223
15224 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
15225
15226         if "--quiet" in myopts:
15227                 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15228                 print "!!! one of the following fully-qualified ebuild names instead:\n"
15229                 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15230                         print "    " + colorize("INFORM", cp)
15231                 return
15232
15233         s = search(root_config, spinner, "--searchdesc" in myopts,
15234                 "--quiet" not in myopts, "--usepkg" in myopts,
15235                 "--usepkgonly" in myopts)
15236         null_cp = portage.dep_getkey(insert_category_into_atom(
15237                 arg, "null"))
15238         cat, atom_pn = portage.catsplit(null_cp)
15239         s.searchkey = atom_pn
15240         for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15241                 s.addCP(cp)
15242         s.output()
15243         print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15244         print "!!! one of the above fully-qualified ebuild names instead.\n"
15245
15246 def profile_check(trees, myaction, myopts):
15247         if myaction in ("info", "sync"):
15248                 return os.EX_OK
15249         elif "--version" in myopts or "--help" in myopts:
15250                 return os.EX_OK
15251         for root, root_trees in trees.iteritems():
15252                 if root_trees["root_config"].settings.profiles:
15253                         continue
15254                 # generate some profile related warning messages
15255                 validate_ebuild_environment(trees)
15256                 msg = "If you have just changed your profile configuration, you " + \
15257                         "should revert back to the previous configuration. Due to " + \
15258                         "your current profile being invalid, allowed actions are " + \
15259                         "limited to --help, --info, --sync, and --version."
15260                 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
15261                         level=logging.ERROR, noiselevel=-1)
15262                 return 1
15263         return os.EX_OK
15264
15265 def emerge_main():
15266         global portage  # NFC why this is necessary now - genone
15267         portage._disable_legacy_globals()
15268         # Disable color until we're sure that it should be enabled (after
15269         # EMERGE_DEFAULT_OPTS has been parsed).
15270         portage.output.havecolor = 0
15271         # This first pass is just for options that need to be known as early as
15272         # possible, such as --config-root.  They will be parsed again later,
15273         # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
15274         # the value of --config-root).
15275         myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
15276         if "--debug" in myopts:
15277                 os.environ["PORTAGE_DEBUG"] = "1"
15278         if "--config-root" in myopts:
15279                 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
15280
15281         # Portage needs to ensure a sane umask for the files it creates.
15282         os.umask(022)
15283         settings, trees, mtimedb = load_emerge_config()
15284         portdb = trees[settings["ROOT"]]["porttree"].dbapi
15285         rval = profile_check(trees, myaction, myopts)
15286         if rval != os.EX_OK:
15287                 return rval
15288
15289         if portage._global_updates(trees, mtimedb["updates"]):
15290                 mtimedb.commit()
15291                 # Reload the whole config from scratch.
15292                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15293                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15294
15295         xterm_titles = "notitles" not in settings.features
15296
15297         tmpcmdline = []
15298         if "--ignore-default-opts" not in myopts:
15299                 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
15300         tmpcmdline.extend(sys.argv[1:])
15301         myaction, myopts, myfiles = parse_opts(tmpcmdline)
15302
15303         if "--digest" in myopts:
15304                 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
15305                 # Reload the whole config from scratch so that the portdbapi internal
15306                 # config is updated with new FEATURES.
15307                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15308                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15309
15310         for myroot in trees:
15311                 mysettings =  trees[myroot]["vartree"].settings
15312                 mysettings.unlock()
15313                 adjust_config(myopts, mysettings)
15314                 if '--pretend' not in myopts and myaction in \
15315                         (None, 'clean', 'depclean', 'prune', 'unmerge'):
15316                         mysettings["PORTAGE_COUNTER_HASH"] = \
15317                                 trees[myroot]["vartree"].dbapi._counter_hash()
15318                         mysettings.backup_changes("PORTAGE_COUNTER_HASH")
15319                 mysettings.lock()
15320                 del myroot, mysettings
15321
15322         apply_priorities(settings)
15323
15324         spinner = stdout_spinner()
15325         if "candy" in settings.features:
15326                 spinner.update = spinner.update_scroll
15327
15328         if "--quiet" not in myopts:
15329                 portage.deprecated_profile_check(settings=settings)
15330                 repo_name_check(trees)
15331                 config_protect_check(trees)
15332
15333         eclasses_overridden = {}
15334         for mytrees in trees.itervalues():
15335                 mydb = mytrees["porttree"].dbapi
15336                 # Freeze the portdbapi for performance (memoize all xmatch results).
15337                 mydb.freeze()
15338                 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
15339         del mytrees, mydb
15340
15341         if eclasses_overridden and \
15342                 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
15343                 prefix = bad(" * ")
15344                 if len(eclasses_overridden) == 1:
15345                         writemsg(prefix + "Overlay eclass overrides " + \
15346                                 "eclass from PORTDIR:\n", noiselevel=-1)
15347                 else:
15348                         writemsg(prefix + "Overlay eclasses override " + \
15349                                 "eclasses from PORTDIR:\n", noiselevel=-1)
15350                 writemsg(prefix + "\n", noiselevel=-1)
15351                 for eclass_name in sorted(eclasses_overridden):
15352                         writemsg(prefix + "  '%s/%s.eclass'\n" % \
15353                                 (eclasses_overridden[eclass_name], eclass_name),
15354                                 noiselevel=-1)
15355                 writemsg(prefix + "\n", noiselevel=-1)
15356                 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
15357                 "because it will trigger invalidation of cached ebuild metadata " + \
15358                 "that is distributed with the portage tree. If you must " + \
15359                 "override eclasses from PORTDIR then you are advised to add " + \
15360                 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
15361                 "`emerge --regen` after each time that you run `emerge --sync`. " + \
15362                 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
15363                 "you would like to disable this warning."
15364                 from textwrap import wrap
15365                 for line in wrap(msg, 72):
15366                         writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
15367
15368         if "moo" in myfiles:
15369                 print """
15370
15371   Larry loves Gentoo (""" + platform.system() + """)
15372
15373  _______________________
15374 < Have you mooed today? >
15375  -----------------------
15376         \   ^__^
15377          \  (oo)\_______
15378             (__)\       )\/\ 
15379                 ||----w |
15380                 ||     ||
15381
15382 """
15383
15384         for x in myfiles:
15385                 ext = os.path.splitext(x)[1]
15386                 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
15387                         print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
15388                         break
15389
15390         root_config = trees[settings["ROOT"]]["root_config"]
15391         if myaction == "list-sets":
15392                 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
15393                 sys.stdout.flush()
15394                 return os.EX_OK
15395
15396         # only expand sets for actions taking package arguments
15397         oldargs = myfiles[:]
15398         if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
15399                 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
15400                 if retval != os.EX_OK:
15401                         return retval
15402
15403                 # Need to handle empty sets specially, otherwise emerge will react 
15404                 # with the help message for empty argument lists
15405                 if oldargs and not myfiles:
15406                         print "emerge: no targets left after set expansion"
15407                         return 0
15408
15409         if ("--tree" in myopts) and ("--columns" in myopts):
15410                 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
15411                 return 1
15412
15413         if ("--quiet" in myopts):
15414                 spinner.update = spinner.update_quiet
15415                 portage.util.noiselimit = -1
15416
15417         # Always create packages if FEATURES=buildpkg
15418         # Imply --buildpkg if --buildpkgonly
15419         if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
15420                 if "--buildpkg" not in myopts:
15421                         myopts["--buildpkg"] = True
15422
15423         # Always try and fetch binary packages if FEATURES=getbinpkg
15424         if ("getbinpkg" in settings.features):
15425                 myopts["--getbinpkg"] = True
15426
15427         if "--buildpkgonly" in myopts:
15428                 # --buildpkgonly will not merge anything, so
15429                 # it cancels all binary package options.
15430                 for opt in ("--getbinpkg", "--getbinpkgonly",
15431                         "--usepkg", "--usepkgonly"):
15432                         myopts.pop(opt, None)
15433
15434         if "--fetch-all-uri" in myopts:
15435                 myopts["--fetchonly"] = True
15436
15437         if "--skipfirst" in myopts and "--resume" not in myopts:
15438                 myopts["--resume"] = True
15439
15440         if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
15441                 myopts["--usepkgonly"] = True
15442
15443         if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
15444                 myopts["--getbinpkg"] = True
15445
15446         if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
15447                 myopts["--usepkg"] = True
15448
15449         # Also allow -K to apply --usepkg/-k
15450         if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
15451                 myopts["--usepkg"] = True
15452
15453         # Allow -p to remove --ask
15454         if ("--pretend" in myopts) and ("--ask" in myopts):
15455                 print ">>> --pretend disables --ask... removing --ask from options."
15456                 del myopts["--ask"]
15457
15458         # forbid --ask when not in a terminal
15459         # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
15460         if ("--ask" in myopts) and (not sys.stdin.isatty()):
15461                 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
15462                         noiselevel=-1)
15463                 return 1
15464
15465         if settings.get("PORTAGE_DEBUG", "") == "1":
15466                 spinner.update = spinner.update_quiet
15467                 portage.debug=1
15468                 if "python-trace" in settings.features:
15469                         import portage.debug
15470                         portage.debug.set_trace(True)
15471
15472         if not ("--quiet" in myopts):
15473                 if not sys.stdout.isatty() or ("--nospinner" in myopts):
15474                         spinner.update = spinner.update_basic
15475
15476         if myaction == 'version':
15477                 print getportageversion(settings["PORTDIR"], settings["ROOT"],
15478                         settings.profile_path, settings["CHOST"],
15479                         trees[settings["ROOT"]]["vartree"].dbapi)
15480                 return 0
15481         elif "--help" in myopts:
15482                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15483                 return 0
15484
15485         if "--debug" in myopts:
15486                 print "myaction", myaction
15487                 print "myopts", myopts
15488
15489         if not myaction and not myfiles and "--resume" not in myopts:
15490                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15491                 return 1
15492
15493         pretend = "--pretend" in myopts
15494         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
15495         buildpkgonly = "--buildpkgonly" in myopts
15496
15497         # check if root user is the current user for the actions where emerge needs this
15498         if portage.secpass < 2:
15499                 # We've already allowed "--version" and "--help" above.
15500                 if "--pretend" not in myopts and myaction not in ("search","info"):
15501                         need_superuser = not \
15502                                 (fetchonly or \
15503                                 (buildpkgonly and secpass >= 1) or \
15504                                 myaction in ("metadata", "regen") or \
15505                                 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
15506                         if portage.secpass < 1 or \
15507                                 need_superuser:
15508                                 if need_superuser:
15509                                         access_desc = "superuser"
15510                                 else:
15511                                         access_desc = "portage group"
15512                                 # Always show portage_group_warning() when only portage group
15513                                 # access is required but the user is not in the portage group.
15514                                 from portage.data import portage_group_warning
15515                                 if "--ask" in myopts:
15516                                         myopts["--pretend"] = True
15517                                         del myopts["--ask"]
15518                                         print ("%s access is required... " + \
15519                                                 "adding --pretend to options.\n") % access_desc
15520                                         if portage.secpass < 1 and not need_superuser:
15521                                                 portage_group_warning()
15522                                 else:
15523                                         sys.stderr.write(("emerge: %s access is " + \
15524                                                 "required.\n\n") % access_desc)
15525                                         if portage.secpass < 1 and not need_superuser:
15526                                                 portage_group_warning()
15527                                         return 1
15528
15529         disable_emergelog = False
15530         for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
15531                 if x in myopts:
15532                         disable_emergelog = True
15533                         break
15534         if myaction in ("search", "info"):
15535                 disable_emergelog = True
15536         if disable_emergelog:
15537                 """ Disable emergelog for everything except build or unmerge
15538                 operations.  This helps minimize parallel emerge.log entries that can
15539                 confuse log parsers.  We especially want it disabled during
15540                 parallel-fetch, which uses --resume --fetchonly."""
15541                 global emergelog
15542                 def emergelog(*pargs, **kargs):
15543                         pass
15544
15545         if not "--pretend" in myopts:
15546                 emergelog(xterm_titles, "Started emerge on: "+\
15547                         time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
15548                 myelogstr=""
15549                 if myopts:
15550                         myelogstr=" ".join(myopts)
15551                 if myaction:
15552                         myelogstr+=" "+myaction
15553                 if myfiles:
15554                         myelogstr += " " + " ".join(oldargs)
15555                 emergelog(xterm_titles, " *** emerge " + myelogstr)
15556         del oldargs
15557
15558         def emergeexitsig(signum, frame):
15559                 signal.signal(signal.SIGINT, signal.SIG_IGN)
15560                 signal.signal(signal.SIGTERM, signal.SIG_IGN)
15561                 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
15562                 sys.exit(100+signum)
15563         signal.signal(signal.SIGINT, emergeexitsig)
15564         signal.signal(signal.SIGTERM, emergeexitsig)
15565
15566         def emergeexit():
15567                 """This gets out final log message in before we quit."""
15568                 if "--pretend" not in myopts:
15569                         emergelog(xterm_titles, " *** terminating.")
15570                 if "notitles" not in settings.features:
15571                         xtermTitleReset()
15572         portage.atexit_register(emergeexit)
15573
15574         if myaction in ("config", "metadata", "regen", "sync"):
15575                 if "--pretend" in myopts:
15576                         sys.stderr.write(("emerge: The '%s' action does " + \
15577                                 "not support '--pretend'.\n") % myaction)
15578                         return 1
15579
15580         if "sync" == myaction:
15581                 return action_sync(settings, trees, mtimedb, myopts, myaction)
15582         elif "metadata" == myaction:
15583                 action_metadata(settings, portdb, myopts)
15584         elif myaction=="regen":
15585                 validate_ebuild_environment(trees)
15586                 return action_regen(settings, portdb, myopts.get("--jobs"),
15587                         myopts.get("--load-average"))
15588         # HELP action
15589         elif "config"==myaction:
15590                 validate_ebuild_environment(trees)
15591                 action_config(settings, trees, myopts, myfiles)
15592
15593         # SEARCH action
15594         elif "search"==myaction:
15595                 validate_ebuild_environment(trees)
15596                 action_search(trees[settings["ROOT"]]["root_config"],
15597                         myopts, myfiles, spinner)
15598         elif myaction in ("clean", "unmerge") or \
15599                 (myaction == "prune" and "--nodeps" in myopts):
15600                 validate_ebuild_environment(trees)
15601
15602                 # Ensure atoms are valid before calling unmerge().
15603                 # For backward compat, leading '=' is not required.
15604                 for x in myfiles:
15605                         if is_valid_package_atom(x) or \
15606                                 is_valid_package_atom("=" + x):
15607                                 continue
15608                         msg = []
15609                         msg.append("'%s' is not a valid package atom." % (x,))
15610                         msg.append("Please check ebuild(5) for full details.")
15611                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15612                                 level=logging.ERROR, noiselevel=-1)
15613                         return 1
15614
15615                 # When given a list of atoms, unmerge
15616                 # them in the order given.
15617                 ordered = myaction == "unmerge"
15618                 if 1 == unmerge(root_config, myopts, myaction, myfiles,
15619                         mtimedb["ldpath"], ordered=ordered):
15620                         if not (buildpkgonly or fetchonly or pretend):
15621                                 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15622
15623         elif myaction in ("depclean", "info", "prune"):
15624
15625                 # Ensure atoms are valid before calling unmerge().
15626                 vardb = trees[settings["ROOT"]]["vartree"].dbapi
15627                 valid_atoms = []
15628                 for x in myfiles:
15629                         if is_valid_package_atom(x):
15630                                 try:
15631                                         valid_atoms.append(
15632                                                 portage.dep_expand(x, mydb=vardb, settings=settings))
15633                                 except portage.exception.AmbiguousPackageName, e:
15634                                         msg = "The short ebuild name \"" + x + \
15635                                                 "\" is ambiguous.  Please specify " + \
15636                                                 "one of the following " + \
15637                                                 "fully-qualified ebuild names instead:"
15638                                         for line in textwrap.wrap(msg, 70):
15639                                                 writemsg_level("!!! %s\n" % (line,),
15640                                                         level=logging.ERROR, noiselevel=-1)
15641                                         for i in e[0]:
15642                                                 writemsg_level("    %s\n" % colorize("INFORM", i),
15643                                                         level=logging.ERROR, noiselevel=-1)
15644                                         writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
15645                                         return 1
15646                                 continue
15647                         msg = []
15648                         msg.append("'%s' is not a valid package atom." % (x,))
15649                         msg.append("Please check ebuild(5) for full details.")
15650                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15651                                 level=logging.ERROR, noiselevel=-1)
15652                         return 1
15653
15654                 if myaction == "info":
15655                         return action_info(settings, trees, myopts, valid_atoms)
15656
15657                 validate_ebuild_environment(trees)
15658                 action_depclean(settings, trees, mtimedb["ldpath"],
15659                         myopts, myaction, valid_atoms, spinner)
15660                 if not (buildpkgonly or fetchonly or pretend):
15661                         post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15662         # "update", "system", or just process files:
15663         else:
15664                 validate_ebuild_environment(trees)
15665                 if "--pretend" not in myopts:
15666                         display_news_notification(root_config, myopts)
15667                 retval = action_build(settings, trees, mtimedb,
15668                         myopts, myaction, myfiles, spinner)
15669                 root_config = trees[settings["ROOT"]]["root_config"]
15670                 post_emerge(root_config, myopts, mtimedb, retval)
15671
15672                 return retval