Make --version and action.
[portage.git] / pym / _emerge / __init__.py
1 #!/usr/bin/python -O
2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id$
5
6 import array
7 from collections import deque
8 import fcntl
9 import formatter
10 import logging
11 import pwd
12 import select
13 import shlex
14 import shutil
15 import signal
16 import sys
17 import textwrap
18 import urlparse
19 import weakref
20 import gc
21 import os, stat
22 import platform
23
24 try:
25         import portage
26 except ImportError:
27         from os import path as osp
28         sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
29         import portage
30
31 from portage import digraph
32 from portage.const import NEWS_LIB_PATH
33
34 import _emerge.help
35 import portage.xpak, commands, errno, re, socket, time
36 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
37         nc_len, red, teal, turquoise, xtermTitle, \
38         xtermTitleReset, yellow
39 from portage.output import create_color_func
40 good = create_color_func("GOOD")
41 bad = create_color_func("BAD")
42 # white looks bad on terminals with white background
43 from portage.output import bold as white
44
45 import portage.elog
46 import portage.dep
47 portage.dep._dep_check_strict = True
48 import portage.util
49 import portage.locks
50 import portage.exception
51 from portage.data import secpass
52 from portage.elog.messages import eerror
53 from portage.util import normalize_path as normpath
54 from portage.util import cmp_sort_key, writemsg, writemsg_level
55 from portage.sets import load_default_config, SETPREFIX
56 from portage.sets.base import InternalPackageSet
57
58 from itertools import chain, izip
59
60 try:
61         import cPickle as pickle
62 except ImportError:
63         import pickle
64
65 try:
66         from cStringIO import StringIO
67 except ImportError:
68         from StringIO import StringIO
69
70 class stdout_spinner(object):
71         scroll_msgs = [
72                 "Gentoo Rocks ("+platform.system()+")",
73                 "Thank you for using Gentoo. :)",
74                 "Are you actually trying to read this?",
75                 "How many times have you stared at this?",
76                 "We are generating the cache right now",
77                 "You are paying too much attention.",
78                 "A theory is better than its explanation.",
79                 "Phasers locked on target, Captain.",
80                 "Thrashing is just virtual crashing.",
81                 "To be is to program.",
82                 "Real Users hate Real Programmers.",
83                 "When all else fails, read the instructions.",
84                 "Functionality breeds Contempt.",
85                 "The future lies ahead.",
86                 "3.1415926535897932384626433832795028841971694",
87                 "Sometimes insanity is the only alternative.",
88                 "Inaccuracy saves a world of explanation.",
89         ]
90
91         twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
92
93         def __init__(self):
94                 self.spinpos = 0
95                 self.update = self.update_twirl
96                 self.scroll_sequence = self.scroll_msgs[
97                         int(time.time() * 100) % len(self.scroll_msgs)]
98                 self.last_update = 0
99                 self.min_display_latency = 0.05
100
101         def _return_early(self):
102                 """
103                 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
104                 each update* method should return without doing any output when this
105                 method returns True.
106                 """
107                 cur_time = time.time()
108                 if cur_time - self.last_update < self.min_display_latency:
109                         return True
110                 self.last_update = cur_time
111                 return False
112
113         def update_basic(self):
114                 self.spinpos = (self.spinpos + 1) % 500
115                 if self._return_early():
116                         return
117                 if (self.spinpos % 100) == 0:
118                         if self.spinpos == 0:
119                                 sys.stdout.write(". ")
120                         else:
121                                 sys.stdout.write(".")
122                 sys.stdout.flush()
123
124         def update_scroll(self):
125                 if self._return_early():
126                         return
127                 if(self.spinpos >= len(self.scroll_sequence)):
128                         sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
129                                 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
130                 else:
131                         sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
132                 sys.stdout.flush()
133                 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
134
135         def update_twirl(self):
136                 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
137                 if self._return_early():
138                         return
139                 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
140                 sys.stdout.flush()
141
142         def update_quiet(self):
143                 return
144
145 def userquery(prompt, responses=None, colours=None):
146         """Displays a prompt and a set of responses, then waits for a response
147         which is checked against the responses and the first to match is
148         returned.  An empty response will match the first value in responses.  The
149         input buffer is *not* cleared prior to the prompt!
150
151         prompt: a String.
152         responses: a List of Strings.
153         colours: a List of Functions taking and returning a String, used to
154         process the responses for display. Typically these will be functions
155         like red() but could be e.g. lambda x: "DisplayString".
156         If responses is omitted, defaults to ["Yes", "No"], [green, red].
157         If only colours is omitted, defaults to [bold, ...].
158
159         Returns a member of the List responses. (If called without optional
160         arguments, returns "Yes" or "No".)
161         KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
162         printed."""
163         if responses is None:
164                 responses = ["Yes", "No"]
165                 colours = [
166                         create_color_func("PROMPT_CHOICE_DEFAULT"),
167                         create_color_func("PROMPT_CHOICE_OTHER")
168                 ]
169         elif colours is None:
170                 colours=[bold]
171         colours=(colours*len(responses))[:len(responses)]
172         print bold(prompt),
173         try:
174                 while True:
175                         response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
176                         for key in responses:
177                                 # An empty response will match the first value in responses.
178                                 if response.upper()==key[:len(response)].upper():
179                                         return key
180                         print "Sorry, response '%s' not understood." % response,
181         except (EOFError, KeyboardInterrupt):
182                 print "Interrupted."
183                 sys.exit(1)
184
185 actions = frozenset([
186 "clean", "config", "depclean",
187 "info", "list-sets", "metadata",
188 "prune", "regen",  "search",
189 "sync",  "unmerge", "version",
190 ])
191 options=[
192 "--ask",          "--alphabetical",
193 "--buildpkg",     "--buildpkgonly",
194 "--changelog",    "--columns",
195 "--complete-graph",
196 "--debug",        "--deep",
197 "--digest",
198 "--emptytree",
199 "--fetchonly",    "--fetch-all-uri",
200 "--getbinpkg",    "--getbinpkgonly",
201 "--help",         "--ignore-default-opts",
202 "--keep-going",
203 "--noconfmem",
204 "--newuse",       "--nocolor",
205 "--nodeps",       "--noreplace",
206 "--nospinner",    "--oneshot",
207 "--onlydeps",     "--pretend",
208 "--quiet",        "--resume",
209 "--searchdesc",   "--selective",
210 "--skipfirst",
211 "--tree",
212 "--update",
213 "--usepkg",       "--usepkgonly",
214 "--verbose",
215 ]
216
217 shortmapping={
218 "1":"--oneshot",
219 "a":"--ask",
220 "b":"--buildpkg",  "B":"--buildpkgonly",
221 "c":"--clean",     "C":"--unmerge",
222 "d":"--debug",     "D":"--deep",
223 "e":"--emptytree",
224 "f":"--fetchonly", "F":"--fetch-all-uri",
225 "g":"--getbinpkg", "G":"--getbinpkgonly",
226 "h":"--help",
227 "k":"--usepkg",    "K":"--usepkgonly",
228 "l":"--changelog",
229 "n":"--noreplace", "N":"--newuse",
230 "o":"--onlydeps",  "O":"--nodeps",
231 "p":"--pretend",   "P":"--prune",
232 "q":"--quiet",
233 "s":"--search",    "S":"--searchdesc",
234 "t":"--tree",
235 "u":"--update",
236 "v":"--verbose",   "V":"--version"
237 }
238
239 def emergelog(xterm_titles, mystr, short_msg=None):
240         if xterm_titles and short_msg:
241                 if "HOSTNAME" in os.environ:
242                         short_msg = os.environ["HOSTNAME"]+": "+short_msg
243                 xtermTitle(short_msg)
244         try:
245                 file_path = "/var/log/emerge.log"
246                 mylogfile = open(file_path, "a")
247                 portage.util.apply_secpass_permissions(file_path,
248                         uid=portage.portage_uid, gid=portage.portage_gid,
249                         mode=0660)
250                 mylock = None
251                 try:
252                         mylock = portage.locks.lockfile(mylogfile)
253                         # seek because we may have gotten held up by the lock.
254                         # if so, we may not be positioned at the end of the file.
255                         mylogfile.seek(0, 2)
256                         mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
257                         mylogfile.flush()
258                 finally:
259                         if mylock:
260                                 portage.locks.unlockfile(mylock)
261                         mylogfile.close()
262         except (IOError,OSError,portage.exception.PortageException), e:
263                 if secpass >= 1:
264                         print >> sys.stderr, "emergelog():",e
265
266 def countdown(secs=5, doing="Starting"):
267         if secs:
268                 print ">>> Waiting",secs,"seconds before starting..."
269                 print ">>> (Control-C to abort)...\n"+doing+" in: ",
270                 ticks=range(secs)
271                 ticks.reverse()
272                 for sec in ticks:
273                         sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
274                         sys.stdout.flush()
275                         time.sleep(1)
276                 print
277
278 # formats a size given in bytes nicely
279 def format_size(mysize):
280         if isinstance(mysize, basestring):
281                 return mysize
282         if 0 != mysize % 1024:
283                 # Always round up to the next kB so that it doesn't show 0 kB when
284                 # some small file still needs to be fetched.
285                 mysize += 1024 - mysize % 1024
286         mystr=str(mysize/1024)
287         mycount=len(mystr)
288         while (mycount > 3):
289                 mycount-=3
290                 mystr=mystr[:mycount]+","+mystr[mycount:]
291         return mystr+" kB"
292
293
294 def getgccversion(chost):
295         """
296         rtype: C{str}
297         return:  the current in-use gcc version
298         """
299
300         gcc_ver_command = 'gcc -dumpversion'
301         gcc_ver_prefix = 'gcc-'
302
303         gcc_not_found_error = red(
304         "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
305         "!!! to update the environment of this terminal and possibly\n" +
306         "!!! other terminals also.\n"
307         )
308
309         mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
310         if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
311                 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
312
313         mystatus, myoutput = commands.getstatusoutput(
314                 chost + "-" + gcc_ver_command)
315         if mystatus == os.EX_OK:
316                 return gcc_ver_prefix + myoutput
317
318         mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
319         if mystatus == os.EX_OK:
320                 return gcc_ver_prefix + myoutput
321
322         portage.writemsg(gcc_not_found_error, noiselevel=-1)
323         return "[unavailable]"
324
325 def getportageversion(portdir, target_root, profile, chost, vardb):
326         profilever = "unavailable"
327         if profile:
328                 realpath = os.path.realpath(profile)
329                 basepath   = os.path.realpath(os.path.join(portdir, "profiles"))
330                 if realpath.startswith(basepath):
331                         profilever = realpath[1 + len(basepath):]
332                 else:
333                         try:
334                                 profilever = "!" + os.readlink(profile)
335                         except (OSError):
336                                 pass
337                 del realpath, basepath
338
339         libcver=[]
340         libclist  = vardb.match("virtual/libc")
341         libclist += vardb.match("virtual/glibc")
342         libclist  = portage.util.unique_array(libclist)
343         for x in libclist:
344                 xs=portage.catpkgsplit(x)
345                 if libcver:
346                         libcver+=","+"-".join(xs[1:])
347                 else:
348                         libcver="-".join(xs[1:])
349         if libcver==[]:
350                 libcver="unavailable"
351
352         gccver = getgccversion(chost)
353         unameout=platform.release()+" "+platform.machine()
354
355         return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
356
357 def create_depgraph_params(myopts, myaction):
358         #configure emerge engine parameters
359         #
360         # self:      include _this_ package regardless of if it is merged.
361         # selective: exclude the package if it is merged
362         # recurse:   go into the dependencies
363         # deep:      go into the dependencies of already merged packages
364         # empty:     pretend nothing is merged
365         # complete:  completely account for all known dependencies
366         # remove:    build graph for use in removing packages
367         myparams = set(["recurse"])
368
369         if myaction == "remove":
370                 myparams.add("remove")
371                 myparams.add("complete")
372                 return myparams
373
374         if "--update" in myopts or \
375                 "--newuse" in myopts or \
376                 "--reinstall" in myopts or \
377                 "--noreplace" in myopts:
378                 myparams.add("selective")
379         if "--emptytree" in myopts:
380                 myparams.add("empty")
381                 myparams.discard("selective")
382         if "--nodeps" in myopts:
383                 myparams.discard("recurse")
384         if "--deep" in myopts:
385                 myparams.add("deep")
386         if "--complete-graph" in myopts:
387                 myparams.add("complete")
388         return myparams
389
390 # search functionality
391 class search(object):
392
393         #
394         # class constants
395         #
396         VERSION_SHORT=1
397         VERSION_RELEASE=2
398
399         #
400         # public interface
401         #
402         def __init__(self, root_config, spinner, searchdesc,
403                 verbose, usepkg, usepkgonly):
404                 """Searches the available and installed packages for the supplied search key.
405                 The list of available and installed packages is created at object instantiation.
406                 This makes successive searches faster."""
407                 self.settings = root_config.settings
408                 self.vartree = root_config.trees["vartree"]
409                 self.spinner = spinner
410                 self.verbose = verbose
411                 self.searchdesc = searchdesc
412                 self.root_config = root_config
413                 self.setconfig = root_config.setconfig
414                 self.matches = {"pkg" : []}
415                 self.mlen = 0
416
417                 def fake_portdb():
418                         pass
419                 self.portdb = fake_portdb
420                 for attrib in ("aux_get", "cp_all",
421                         "xmatch", "findname", "getFetchMap"):
422                         setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
423
424                 self._dbs = []
425
426                 portdb = root_config.trees["porttree"].dbapi
427                 bindb = root_config.trees["bintree"].dbapi
428                 vardb = root_config.trees["vartree"].dbapi
429
430                 if not usepkgonly and portdb._have_root_eclass_dir:
431                         self._dbs.append(portdb)
432
433                 if (usepkg or usepkgonly) and bindb.cp_all():
434                         self._dbs.append(bindb)
435
436                 self._dbs.append(vardb)
437                 self._portdb = portdb
438
439         def _cp_all(self):
440                 cp_all = set()
441                 for db in self._dbs:
442                         cp_all.update(db.cp_all())
443                 return list(sorted(cp_all))
444
445         def _aux_get(self, *args, **kwargs):
446                 for db in self._dbs:
447                         try:
448                                 return db.aux_get(*args, **kwargs)
449                         except KeyError:
450                                 pass
451                 raise
452
453         def _findname(self, *args, **kwargs):
454                 for db in self._dbs:
455                         if db is not self._portdb:
456                                 # We don't want findname to return anything
457                                 # unless it's an ebuild in a portage tree.
458                                 # Otherwise, it's already built and we don't
459                                 # care about it.
460                                 continue
461                         func = getattr(db, "findname", None)
462                         if func:
463                                 value = func(*args, **kwargs)
464                                 if value:
465                                         return value
466                 return None
467
468         def _getFetchMap(self, *args, **kwargs):
469                 for db in self._dbs:
470                         func = getattr(db, "getFetchMap", None)
471                         if func:
472                                 value = func(*args, **kwargs)
473                                 if value:
474                                         return value
475                 return {}
476
477         def _visible(self, db, cpv, metadata):
478                 installed = db is self.vartree.dbapi
479                 built = installed or db is not self._portdb
480                 pkg_type = "ebuild"
481                 if installed:
482                         pkg_type = "installed"
483                 elif built:
484                         pkg_type = "binary"
485                 return visible(self.settings,
486                         Package(type_name=pkg_type, root_config=self.root_config,
487                         cpv=cpv, built=built, installed=installed, metadata=metadata))
488
489         def _xmatch(self, level, atom):
490                 """
491                 This method does not expand old-style virtuals because it
492                 is restricted to returning matches for a single ${CATEGORY}/${PN}
493                 and old-style virual matches unreliable for that when querying
494                 multiple package databases. If necessary, old-style virtuals
495                 can be performed on atoms prior to calling this method.
496                 """
497                 cp = portage.dep_getkey(atom)
498                 if level == "match-all":
499                         matches = set()
500                         for db in self._dbs:
501                                 if hasattr(db, "xmatch"):
502                                         matches.update(db.xmatch(level, atom))
503                                 else:
504                                         matches.update(db.match(atom))
505                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
506                         db._cpv_sort_ascending(result)
507                 elif level == "match-visible":
508                         matches = set()
509                         for db in self._dbs:
510                                 if hasattr(db, "xmatch"):
511                                         matches.update(db.xmatch(level, atom))
512                                 else:
513                                         db_keys = list(db._aux_cache_keys)
514                                         for cpv in db.match(atom):
515                                                 metadata = izip(db_keys,
516                                                         db.aux_get(cpv, db_keys))
517                                                 if not self._visible(db, cpv, metadata):
518                                                         continue
519                                                 matches.add(cpv)
520                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
521                         db._cpv_sort_ascending(result)
522                 elif level == "bestmatch-visible":
523                         result = None
524                         for db in self._dbs:
525                                 if hasattr(db, "xmatch"):
526                                         cpv = db.xmatch("bestmatch-visible", atom)
527                                         if not cpv or portage.cpv_getkey(cpv) != cp:
528                                                 continue
529                                         if not result or cpv == portage.best([cpv, result]):
530                                                 result = cpv
531                                 else:
532                                         db_keys = Package.metadata_keys
533                                         # break out of this loop with highest visible
534                                         # match, checked in descending order
535                                         for cpv in reversed(db.match(atom)):
536                                                 if portage.cpv_getkey(cpv) != cp:
537                                                         continue
538                                                 metadata = izip(db_keys,
539                                                         db.aux_get(cpv, db_keys))
540                                                 if not self._visible(db, cpv, metadata):
541                                                         continue
542                                                 if not result or cpv == portage.best([cpv, result]):
543                                                         result = cpv
544                                                 break
545                 else:
546                         raise NotImplementedError(level)
547                 return result
548
549         def execute(self,searchkey):
550                 """Performs the search for the supplied search key"""
551                 match_category = 0
552                 self.searchkey=searchkey
553                 self.packagematches = []
554                 if self.searchdesc:
555                         self.searchdesc=1
556                         self.matches = {"pkg":[], "desc":[], "set":[]}
557                 else:
558                         self.searchdesc=0
559                         self.matches = {"pkg":[], "set":[]}
560                 print "Searching...   ",
561
562                 regexsearch = False
563                 if self.searchkey.startswith('%'):
564                         regexsearch = True
565                         self.searchkey = self.searchkey[1:]
566                 if self.searchkey.startswith('@'):
567                         match_category = 1
568                         self.searchkey = self.searchkey[1:]
569                 if regexsearch:
570                         self.searchre=re.compile(self.searchkey,re.I)
571                 else:
572                         self.searchre=re.compile(re.escape(self.searchkey), re.I)
573                 for package in self.portdb.cp_all():
574                         self.spinner.update()
575
576                         if match_category:
577                                 match_string  = package[:]
578                         else:
579                                 match_string  = package.split("/")[-1]
580
581                         masked=0
582                         if self.searchre.search(match_string):
583                                 if not self.portdb.xmatch("match-visible", package):
584                                         masked=1
585                                 self.matches["pkg"].append([package,masked])
586                         elif self.searchdesc: # DESCRIPTION searching
587                                 full_package = self.portdb.xmatch("bestmatch-visible", package)
588                                 if not full_package:
589                                         #no match found; we don't want to query description
590                                         full_package = portage.best(
591                                                 self.portdb.xmatch("match-all", package))
592                                         if not full_package:
593                                                 continue
594                                         else:
595                                                 masked=1
596                                 try:
597                                         full_desc = self.portdb.aux_get(
598                                                 full_package, ["DESCRIPTION"])[0]
599                                 except KeyError:
600                                         print "emerge: search: aux_get() failed, skipping"
601                                         continue
602                                 if self.searchre.search(full_desc):
603                                         self.matches["desc"].append([full_package,masked])
604
605                 self.sdict = self.setconfig.getSets()
606                 for setname in self.sdict:
607                         self.spinner.update()
608                         if match_category:
609                                 match_string = setname
610                         else:
611                                 match_string = setname.split("/")[-1]
612                         
613                         if self.searchre.search(match_string):
614                                 self.matches["set"].append([setname, False])
615                         elif self.searchdesc:
616                                 if self.searchre.search(
617                                         self.sdict[setname].getMetadata("DESCRIPTION")):
618                                         self.matches["set"].append([setname, False])
619                         
620                 self.mlen=0
621                 for mtype in self.matches:
622                         self.matches[mtype].sort()
623                         self.mlen += len(self.matches[mtype])
624
625         def addCP(self, cp):
626                 if not self.portdb.xmatch("match-all", cp):
627                         return
628                 masked = 0
629                 if not self.portdb.xmatch("bestmatch-visible", cp):
630                         masked = 1
631                 self.matches["pkg"].append([cp, masked])
632                 self.mlen += 1
633
634         def output(self):
635                 """Outputs the results of the search."""
636                 print "\b\b  \n[ Results for search key : "+white(self.searchkey)+" ]"
637                 print "[ Applications found : "+white(str(self.mlen))+" ]"
638                 print " "
639                 vardb = self.vartree.dbapi
640                 for mtype in self.matches:
641                         for match,masked in self.matches[mtype]:
642                                 full_package = None
643                                 if mtype == "pkg":
644                                         catpack = match
645                                         full_package = self.portdb.xmatch(
646                                                 "bestmatch-visible", match)
647                                         if not full_package:
648                                                 #no match found; we don't want to query description
649                                                 masked=1
650                                                 full_package = portage.best(
651                                                         self.portdb.xmatch("match-all",match))
652                                 elif mtype == "desc":
653                                         full_package = match
654                                         match        = portage.cpv_getkey(match)
655                                 elif mtype == "set":
656                                         print green("*")+"  "+white(match)
657                                         print "     ", darkgreen("Description:")+"  ", self.sdict[match].getMetadata("DESCRIPTION")
658                                         print
659                                 if full_package:
660                                         try:
661                                                 desc, homepage, license = self.portdb.aux_get(
662                                                         full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
663                                         except KeyError:
664                                                 print "emerge: search: aux_get() failed, skipping"
665                                                 continue
666                                         if masked:
667                                                 print green("*")+"  "+white(match)+" "+red("[ Masked ]")
668                                         else:
669                                                 print green("*")+"  "+white(match)
670                                         myversion = self.getVersion(full_package, search.VERSION_RELEASE)
671
672                                         mysum = [0,0]
673                                         file_size_str = None
674                                         mycat = match.split("/")[0]
675                                         mypkg = match.split("/")[1]
676                                         mycpv = match + "-" + myversion
677                                         myebuild = self.portdb.findname(mycpv)
678                                         if myebuild:
679                                                 pkgdir = os.path.dirname(myebuild)
680                                                 from portage import manifest
681                                                 mf = manifest.Manifest(
682                                                         pkgdir, self.settings["DISTDIR"])
683                                                 try:
684                                                         uri_map = self.portdb.getFetchMap(mycpv)
685                                                 except portage.exception.InvalidDependString, e:
686                                                         file_size_str = "Unknown (%s)" % (e,)
687                                                         del e
688                                                 else:
689                                                         try:
690                                                                 mysum[0] = mf.getDistfilesSize(uri_map)
691                                                         except KeyError, e:
692                                                                 file_size_str = "Unknown (missing " + \
693                                                                         "digest for %s)" % (e,)
694                                                                 del e
695
696                                         available = False
697                                         for db in self._dbs:
698                                                 if db is not vardb and \
699                                                         db.cpv_exists(mycpv):
700                                                         available = True
701                                                         if not myebuild and hasattr(db, "bintree"):
702                                                                 myebuild = db.bintree.getname(mycpv)
703                                                                 try:
704                                                                         mysum[0] = os.stat(myebuild).st_size
705                                                                 except OSError:
706                                                                         myebuild = None
707                                                         break
708
709                                         if myebuild and file_size_str is None:
710                                                 mystr = str(mysum[0] / 1024)
711                                                 mycount = len(mystr)
712                                                 while (mycount > 3):
713                                                         mycount -= 3
714                                                         mystr = mystr[:mycount] + "," + mystr[mycount:]
715                                                 file_size_str = mystr + " kB"
716
717                                         if self.verbose:
718                                                 if available:
719                                                         print "     ", darkgreen("Latest version available:"),myversion
720                                                 print "     ", self.getInstallationStatus(mycat+'/'+mypkg)
721                                                 if myebuild:
722                                                         print "      %s %s" % \
723                                                                 (darkgreen("Size of files:"), file_size_str)
724                                                 print "     ", darkgreen("Homepage:")+"     ",homepage
725                                                 print "     ", darkgreen("Description:")+"  ",desc
726                                                 print "     ", darkgreen("License:")+"      ",license
727                                                 print
728         #
729         # private interface
730         #
731         def getInstallationStatus(self,package):
732                 installed_package = self.vartree.dep_bestmatch(package)
733                 result = ""
734                 version = self.getVersion(installed_package,search.VERSION_RELEASE)
735                 if len(version) > 0:
736                         result = darkgreen("Latest version installed:")+" "+version
737                 else:
738                         result = darkgreen("Latest version installed:")+" [ Not Installed ]"
739                 return result
740
741         def getVersion(self,full_package,detail):
742                 if len(full_package) > 1:
743                         package_parts = portage.catpkgsplit(full_package)
744                         if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
745                                 result = package_parts[2]+ "-" + package_parts[3]
746                         else:
747                                 result = package_parts[2]
748                 else:
749                         result = ""
750                 return result
751
752 class RootConfig(object):
753         """This is used internally by depgraph to track information about a
754         particular $ROOT."""
755
756         pkg_tree_map = {
757                 "ebuild"    : "porttree",
758                 "binary"    : "bintree",
759                 "installed" : "vartree"
760         }
761
762         tree_pkg_map = {}
763         for k, v in pkg_tree_map.iteritems():
764                 tree_pkg_map[v] = k
765
766         def __init__(self, settings, trees, setconfig):
767                 self.trees = trees
768                 self.settings = settings
769                 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
770                 self.root = self.settings["ROOT"]
771                 self.setconfig = setconfig
772                 self.sets = self.setconfig.getSets()
773                 self.visible_pkgs = PackageVirtualDbapi(self.settings)
774
775 def create_world_atom(pkg, args_set, root_config):
776         """Create a new atom for the world file if one does not exist.  If the
777         argument atom is precise enough to identify a specific slot then a slot
778         atom will be returned. Atoms that are in the system set may also be stored
779         in world since system atoms can only match one slot while world atoms can
780         be greedy with respect to slots.  Unslotted system packages will not be
781         stored in world."""
782
783         arg_atom = args_set.findAtomForPackage(pkg)
784         if not arg_atom:
785                 return None
786         cp = portage.dep_getkey(arg_atom)
787         new_world_atom = cp
788         sets = root_config.sets
789         portdb = root_config.trees["porttree"].dbapi
790         vardb = root_config.trees["vartree"].dbapi
791         available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
792                 for cpv in portdb.match(cp))
793         slotted = len(available_slots) > 1 or \
794                 (len(available_slots) == 1 and "0" not in available_slots)
795         if not slotted:
796                 # check the vdb in case this is multislot
797                 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
798                         for cpv in vardb.match(cp))
799                 slotted = len(available_slots) > 1 or \
800                         (len(available_slots) == 1 and "0" not in available_slots)
801         if slotted and arg_atom != cp:
802                 # If the user gave a specific atom, store it as a
803                 # slot atom in the world file.
804                 slot_atom = pkg.slot_atom
805
806                 # For USE=multislot, there are a couple of cases to
807                 # handle here:
808                 #
809                 # 1) SLOT="0", but the real SLOT spontaneously changed to some
810                 #    unknown value, so just record an unslotted atom.
811                 #
812                 # 2) SLOT comes from an installed package and there is no
813                 #    matching SLOT in the portage tree.
814                 #
815                 # Make sure that the slot atom is available in either the
816                 # portdb or the vardb, since otherwise the user certainly
817                 # doesn't want the SLOT atom recorded in the world file
818                 # (case 1 above).  If it's only available in the vardb,
819                 # the user may be trying to prevent a USE=multislot
820                 # package from being removed by --depclean (case 2 above).
821
822                 mydb = portdb
823                 if not portdb.match(slot_atom):
824                         # SLOT seems to come from an installed multislot package
825                         mydb = vardb
826                 # If there is no installed package matching the SLOT atom,
827                 # it probably changed SLOT spontaneously due to USE=multislot,
828                 # so just record an unslotted atom.
829                 if vardb.match(slot_atom):
830                         # Now verify that the argument is precise
831                         # enough to identify a specific slot.
832                         matches = mydb.match(arg_atom)
833                         matched_slots = set()
834                         for cpv in matches:
835                                 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
836                         if len(matched_slots) == 1:
837                                 new_world_atom = slot_atom
838
839         if new_world_atom == sets["world"].findAtomForPackage(pkg):
840                 # Both atoms would be identical, so there's nothing to add.
841                 return None
842         if not slotted:
843                 # Unlike world atoms, system atoms are not greedy for slots, so they
844                 # can't be safely excluded from world if they are slotted.
845                 system_atom = sets["system"].findAtomForPackage(pkg)
846                 if system_atom:
847                         if not portage.dep_getkey(system_atom).startswith("virtual/"):
848                                 return None
849                         # System virtuals aren't safe to exclude from world since they can
850                         # match multiple old-style virtuals but only one of them will be
851                         # pulled in by update or depclean.
852                         providers = portdb.mysettings.getvirtuals().get(
853                                 portage.dep_getkey(system_atom))
854                         if providers and len(providers) == 1 and providers[0] == cp:
855                                 return None
856         return new_world_atom
857
858 def filter_iuse_defaults(iuse):
859         for flag in iuse:
860                 if flag.startswith("+") or flag.startswith("-"):
861                         yield flag[1:]
862                 else:
863                         yield flag
864
865 class SlotObject(object):
866         __slots__ = ("__weakref__",)
867
868         def __init__(self, **kwargs):
869                 classes = [self.__class__]
870                 while classes:
871                         c = classes.pop()
872                         if c is SlotObject:
873                                 continue
874                         classes.extend(c.__bases__)
875                         slots = getattr(c, "__slots__", None)
876                         if not slots:
877                                 continue
878                         for myattr in slots:
879                                 myvalue = kwargs.get(myattr, None)
880                                 setattr(self, myattr, myvalue)
881
882         def copy(self):
883                 """
884                 Create a new instance and copy all attributes
885                 defined from __slots__ (including those from
886                 inherited classes).
887                 """
888                 obj = self.__class__()
889
890                 classes = [self.__class__]
891                 while classes:
892                         c = classes.pop()
893                         if c is SlotObject:
894                                 continue
895                         classes.extend(c.__bases__)
896                         slots = getattr(c, "__slots__", None)
897                         if not slots:
898                                 continue
899                         for myattr in slots:
900                                 setattr(obj, myattr, getattr(self, myattr))
901
902                 return obj
903
904 class AbstractDepPriority(SlotObject):
905         __slots__ = ("buildtime", "runtime", "runtime_post")
906
907         def __lt__(self, other):
908                 return self.__int__() < other
909
910         def __le__(self, other):
911                 return self.__int__() <= other
912
913         def __eq__(self, other):
914                 return self.__int__() == other
915
916         def __ne__(self, other):
917                 return self.__int__() != other
918
919         def __gt__(self, other):
920                 return self.__int__() > other
921
922         def __ge__(self, other):
923                 return self.__int__() >= other
924
925         def copy(self):
926                 import copy
927                 return copy.copy(self)
928
929 class DepPriority(AbstractDepPriority):
930
931         __slots__ = ("satisfied", "optional", "rebuild")
932
933         def __int__(self):
934                 return 0
935
936         def __str__(self):
937                 if self.optional:
938                         return "optional"
939                 if self.buildtime:
940                         return "buildtime"
941                 if self.runtime:
942                         return "runtime"
943                 if self.runtime_post:
944                         return "runtime_post"
945                 return "soft"
946
947 class BlockerDepPriority(DepPriority):
948         __slots__ = ()
949         def __int__(self):
950                 return 0
951
952         def __str__(self):
953                 return 'blocker'
954
955 BlockerDepPriority.instance = BlockerDepPriority()
956
957 class UnmergeDepPriority(AbstractDepPriority):
958         __slots__ = ("optional", "satisfied",)
959         """
960         Combination of properties           Priority  Category
961
962         runtime                                0       HARD
963         runtime_post                          -1       HARD
964         buildtime                             -2       SOFT
965         (none of the above)                   -2       SOFT
966         """
967
968         MAX    =  0
969         SOFT   = -2
970         MIN    = -2
971
972         def __int__(self):
973                 if self.runtime:
974                         return 0
975                 if self.runtime_post:
976                         return -1
977                 if self.buildtime:
978                         return -2
979                 return -2
980
981         def __str__(self):
982                 myvalue = self.__int__()
983                 if myvalue > self.SOFT:
984                         return "hard"
985                 return "soft"
986
987 class DepPriorityNormalRange(object):
988         """
989         DepPriority properties              Index      Category
990
991         buildtime                                      HARD
992         runtime                                3       MEDIUM
993         runtime_post                           2       MEDIUM_SOFT
994         optional                               1       SOFT
995         (none of the above)                    0       NONE
996         """
997         MEDIUM      = 3
998         MEDIUM_SOFT = 2
999         SOFT        = 1
1000         NONE        = 0
1001
1002         @classmethod
1003         def _ignore_optional(cls, priority):
1004                 if priority.__class__ is not DepPriority:
1005                         return False
1006                 return bool(priority.optional)
1007
1008         @classmethod
1009         def _ignore_runtime_post(cls, priority):
1010                 if priority.__class__ is not DepPriority:
1011                         return False
1012                 return bool(priority.optional or priority.runtime_post)
1013
1014         @classmethod
1015         def _ignore_runtime(cls, priority):
1016                 if priority.__class__ is not DepPriority:
1017                         return False
1018                 return not priority.buildtime
1019
1020         ignore_medium      = _ignore_runtime
1021         ignore_medium_soft = _ignore_runtime_post
1022         ignore_soft        = _ignore_optional
1023
1024 DepPriorityNormalRange.ignore_priority = (
1025         None,
1026         DepPriorityNormalRange._ignore_optional,
1027         DepPriorityNormalRange._ignore_runtime_post,
1028         DepPriorityNormalRange._ignore_runtime
1029 )
1030
1031 class DepPrioritySatisfiedRange(object):
1032         """
1033         DepPriority                         Index      Category
1034
1035         not satisfied and buildtime                    HARD
1036         not satisfied and runtime              7       MEDIUM
1037         not satisfied and runtime_post         6       MEDIUM_SOFT
1038         satisfied and buildtime and rebuild    5       SOFT
1039         satisfied and buildtime                4       SOFT
1040         satisfied and runtime                  3       SOFT
1041         satisfied and runtime_post             2       SOFT
1042         optional                               1       SOFT
1043         (none of the above)                    0       NONE
1044         """
1045         MEDIUM      = 7
1046         MEDIUM_SOFT = 6
1047         SOFT        = 5
1048         NONE        = 0
1049
1050         @classmethod
1051         def _ignore_optional(cls, priority):
1052                 if priority.__class__ is not DepPriority:
1053                         return False
1054                 return bool(priority.optional)
1055
1056         @classmethod
1057         def _ignore_satisfied_runtime_post(cls, priority):
1058                 if priority.__class__ is not DepPriority:
1059                         return False
1060                 if priority.optional:
1061                         return True
1062                 if not priority.satisfied:
1063                         return False
1064                 return bool(priority.runtime_post)
1065
1066         @classmethod
1067         def _ignore_satisfied_runtime(cls, priority):
1068                 if priority.__class__ is not DepPriority:
1069                         return False
1070                 if priority.optional:
1071                         return True
1072                 if not priority.satisfied:
1073                         return False
1074                 return not priority.buildtime
1075
1076         @classmethod
1077         def _ignore_satisfied_buildtime(cls, priority):
1078                 if priority.__class__ is not DepPriority:
1079                         return False
1080                 if priority.optional:
1081                         return True
1082                 if not priority.satisfied:
1083                         return False
1084                 if priority.buildtime:
1085                         return not priority.rebuild
1086                 return True
1087
1088         @classmethod
1089         def _ignore_satisfied_buildtime_rebuild(cls, priority):
1090                 if priority.__class__ is not DepPriority:
1091                         return False
1092                 if priority.optional:
1093                         return True
1094                 return bool(priority.satisfied)
1095
1096         @classmethod
1097         def _ignore_runtime_post(cls, priority):
1098                 if priority.__class__ is not DepPriority:
1099                         return False
1100                 return bool(priority.optional or \
1101                         priority.satisfied or \
1102                         priority.runtime_post)
1103
1104         @classmethod
1105         def _ignore_runtime(cls, priority):
1106                 if priority.__class__ is not DepPriority:
1107                         return False
1108                 return bool(priority.satisfied or \
1109                         not priority.buildtime)
1110
1111         ignore_medium      = _ignore_runtime
1112         ignore_medium_soft = _ignore_runtime_post
1113         ignore_soft        = _ignore_satisfied_buildtime_rebuild
1114
1115 DepPrioritySatisfiedRange.ignore_priority = (
1116         None,
1117         DepPrioritySatisfiedRange._ignore_optional,
1118         DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1119         DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1120         DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1121         DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1122         DepPrioritySatisfiedRange._ignore_runtime_post,
1123         DepPrioritySatisfiedRange._ignore_runtime
1124 )
1125
1126 def _find_deep_system_runtime_deps(graph):
1127         deep_system_deps = set()
1128         node_stack = []
1129         for node in graph:
1130                 if not isinstance(node, Package) or \
1131                         node.operation == 'uninstall':
1132                         continue
1133                 if node.root_config.sets['system'].findAtomForPackage(node):
1134                         node_stack.append(node)
1135
1136         def ignore_priority(priority):
1137                 """
1138                 Ignore non-runtime priorities.
1139                 """
1140                 if isinstance(priority, DepPriority) and \
1141                         (priority.runtime or priority.runtime_post):
1142                         return False
1143                 return True
1144
1145         while node_stack:
1146                 node = node_stack.pop()
1147                 if node in deep_system_deps:
1148                         continue
1149                 deep_system_deps.add(node)
1150                 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1151                         if not isinstance(child, Package) or \
1152                                 child.operation == 'uninstall':
1153                                 continue
1154                         node_stack.append(child)
1155
1156         return deep_system_deps
1157
1158 class FakeVartree(portage.vartree):
1159         """This is implements an in-memory copy of a vartree instance that provides
1160         all the interfaces required for use by the depgraph.  The vardb is locked
1161         during the constructor call just long enough to read a copy of the
1162         installed package information.  This allows the depgraph to do it's
1163         dependency calculations without holding a lock on the vardb.  It also
1164         allows things like vardb global updates to be done in memory so that the
1165         user doesn't necessarily need write access to the vardb in cases where
1166         global updates are necessary (updates are performed when necessary if there
1167         is not a matching ebuild in the tree)."""
1168         def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1169                 self._root_config = root_config
1170                 if pkg_cache is None:
1171                         pkg_cache = {}
1172                 real_vartree = root_config.trees["vartree"]
1173                 portdb = root_config.trees["porttree"].dbapi
1174                 self.root = real_vartree.root
1175                 self.settings = real_vartree.settings
1176                 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1177                 if "_mtime_" not in mykeys:
1178                         mykeys.append("_mtime_")
1179                 self._db_keys = mykeys
1180                 self._pkg_cache = pkg_cache
1181                 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1182                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1183                 try:
1184                         # At least the parent needs to exist for the lock file.
1185                         portage.util.ensure_dirs(vdb_path)
1186                 except portage.exception.PortageException:
1187                         pass
1188                 vdb_lock = None
1189                 try:
1190                         if acquire_lock and os.access(vdb_path, os.W_OK):
1191                                 vdb_lock = portage.locks.lockdir(vdb_path)
1192                         real_dbapi = real_vartree.dbapi
1193                         slot_counters = {}
1194                         for cpv in real_dbapi.cpv_all():
1195                                 cache_key = ("installed", self.root, cpv, "nomerge")
1196                                 pkg = self._pkg_cache.get(cache_key)
1197                                 if pkg is not None:
1198                                         metadata = pkg.metadata
1199                                 else:
1200                                         metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1201                                 myslot = metadata["SLOT"]
1202                                 mycp = portage.dep_getkey(cpv)
1203                                 myslot_atom = "%s:%s" % (mycp, myslot)
1204                                 try:
1205                                         mycounter = long(metadata["COUNTER"])
1206                                 except ValueError:
1207                                         mycounter = 0
1208                                         metadata["COUNTER"] = str(mycounter)
1209                                 other_counter = slot_counters.get(myslot_atom, None)
1210                                 if other_counter is not None:
1211                                         if other_counter > mycounter:
1212                                                 continue
1213                                 slot_counters[myslot_atom] = mycounter
1214                                 if pkg is None:
1215                                         pkg = Package(built=True, cpv=cpv,
1216                                                 installed=True, metadata=metadata,
1217                                                 root_config=root_config, type_name="installed")
1218                                 self._pkg_cache[pkg] = pkg
1219                                 self.dbapi.cpv_inject(pkg)
1220                         real_dbapi.flush_cache()
1221                 finally:
1222                         if vdb_lock:
1223                                 portage.locks.unlockdir(vdb_lock)
1224                 # Populate the old-style virtuals using the cached values.
1225                 if not self.settings.treeVirtuals:
1226                         self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1227                                 portage.getCPFromCPV, self.get_all_provides())
1228
1229                 # Intialize variables needed for lazy cache pulls of the live ebuild
1230                 # metadata.  This ensures that the vardb lock is released ASAP, without
1231                 # being delayed in case cache generation is triggered.
1232                 self._aux_get = self.dbapi.aux_get
1233                 self.dbapi.aux_get = self._aux_get_wrapper
1234                 self._match = self.dbapi.match
1235                 self.dbapi.match = self._match_wrapper
1236                 self._aux_get_history = set()
1237                 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1238                 self._portdb = portdb
1239                 self._global_updates = None
1240
1241         def _match_wrapper(self, cpv, use_cache=1):
1242                 """
1243                 Make sure the metadata in Package instances gets updated for any
1244                 cpv that is returned from a match() call, since the metadata can
1245                 be accessed directly from the Package instance instead of via
1246                 aux_get().
1247                 """
1248                 matches = self._match(cpv, use_cache=use_cache)
1249                 for cpv in matches:
1250                         if cpv in self._aux_get_history:
1251                                 continue
1252                         self._aux_get_wrapper(cpv, [])
1253                 return matches
1254
1255         def _aux_get_wrapper(self, pkg, wants):
1256                 if pkg in self._aux_get_history:
1257                         return self._aux_get(pkg, wants)
1258                 self._aux_get_history.add(pkg)
1259                 try:
1260                         # Use the live ebuild metadata if possible.
1261                         live_metadata = dict(izip(self._portdb_keys,
1262                                 self._portdb.aux_get(pkg, self._portdb_keys)))
1263                         if not portage.eapi_is_supported(live_metadata["EAPI"]):
1264                                 raise KeyError(pkg)
1265                         self.dbapi.aux_update(pkg, live_metadata)
1266                 except (KeyError, portage.exception.PortageException):
1267                         if self._global_updates is None:
1268                                 self._global_updates = \
1269                                         grab_global_updates(self._portdb.porttree_root)
1270                         perform_global_updates(
1271                                 pkg, self.dbapi, self._global_updates)
1272                 return self._aux_get(pkg, wants)
1273
1274         def sync(self, acquire_lock=1):
1275                 """
1276                 Call this method to synchronize state with the real vardb
1277                 after one or more packages may have been installed or
1278                 uninstalled.
1279                 """
1280                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1281                 try:
1282                         # At least the parent needs to exist for the lock file.
1283                         portage.util.ensure_dirs(vdb_path)
1284                 except portage.exception.PortageException:
1285                         pass
1286                 vdb_lock = None
1287                 try:
1288                         if acquire_lock and os.access(vdb_path, os.W_OK):
1289                                 vdb_lock = portage.locks.lockdir(vdb_path)
1290                         self._sync()
1291                 finally:
1292                         if vdb_lock:
1293                                 portage.locks.unlockdir(vdb_lock)
1294
1295         def _sync(self):
1296
1297                 real_vardb = self._root_config.trees["vartree"].dbapi
1298                 current_cpv_set = frozenset(real_vardb.cpv_all())
1299                 pkg_vardb = self.dbapi
1300                 aux_get_history = self._aux_get_history
1301
1302                 # Remove any packages that have been uninstalled.
1303                 for pkg in list(pkg_vardb):
1304                         if pkg.cpv not in current_cpv_set:
1305                                 pkg_vardb.cpv_remove(pkg)
1306                                 aux_get_history.discard(pkg.cpv)
1307
1308                 # Validate counters and timestamps.
1309                 slot_counters = {}
1310                 root = self.root
1311                 validation_keys = ["COUNTER", "_mtime_"]
1312                 for cpv in current_cpv_set:
1313
1314                         pkg_hash_key = ("installed", root, cpv, "nomerge")
1315                         pkg = pkg_vardb.get(pkg_hash_key)
1316                         if pkg is not None:
1317                                 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1318                                 try:
1319                                         counter = long(counter)
1320                                 except ValueError:
1321                                         counter = 0
1322
1323                                 if counter != pkg.counter or \
1324                                         mtime != pkg.mtime:
1325                                         pkg_vardb.cpv_remove(pkg)
1326                                         aux_get_history.discard(pkg.cpv)
1327                                         pkg = None
1328
1329                         if pkg is None:
1330                                 pkg = self._pkg(cpv)
1331
1332                         other_counter = slot_counters.get(pkg.slot_atom)
1333                         if other_counter is not None:
1334                                 if other_counter > pkg.counter:
1335                                         continue
1336
1337                         slot_counters[pkg.slot_atom] = pkg.counter
1338                         pkg_vardb.cpv_inject(pkg)
1339
1340                 real_vardb.flush_cache()
1341
1342         def _pkg(self, cpv):
1343                 root_config = self._root_config
1344                 real_vardb = root_config.trees["vartree"].dbapi
1345                 pkg = Package(cpv=cpv, installed=True,
1346                         metadata=izip(self._db_keys,
1347                         real_vardb.aux_get(cpv, self._db_keys)),
1348                         root_config=root_config,
1349                         type_name="installed")
1350
1351                 try:
1352                         mycounter = long(pkg.metadata["COUNTER"])
1353                 except ValueError:
1354                         mycounter = 0
1355                         pkg.metadata["COUNTER"] = str(mycounter)
1356
1357                 return pkg
1358
1359 def grab_global_updates(portdir):
1360         from portage.update import grab_updates, parse_updates
1361         updpath = os.path.join(portdir, "profiles", "updates")
1362         try:
1363                 rawupdates = grab_updates(updpath)
1364         except portage.exception.DirectoryNotFound:
1365                 rawupdates = []
1366         upd_commands = []
1367         for mykey, mystat, mycontent in rawupdates:
1368                 commands, errors = parse_updates(mycontent)
1369                 upd_commands.extend(commands)
1370         return upd_commands
1371
1372 def perform_global_updates(mycpv, mydb, mycommands):
1373         from portage.update import update_dbentries
1374         aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1375         aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1376         updates = update_dbentries(mycommands, aux_dict)
1377         if updates:
1378                 mydb.aux_update(mycpv, updates)
1379
1380 def visible(pkgsettings, pkg):
1381         """
1382         Check if a package is visible. This can raise an InvalidDependString
1383         exception if LICENSE is invalid.
1384         TODO: optionally generate a list of masking reasons
1385         @rtype: Boolean
1386         @returns: True if the package is visible, False otherwise.
1387         """
1388         if not pkg.metadata["SLOT"]:
1389                 return False
1390         if not pkg.installed:
1391                 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1392                         return False
1393         eapi = pkg.metadata["EAPI"]
1394         if not portage.eapi_is_supported(eapi):
1395                 return False
1396         if not pkg.installed:
1397                 if portage._eapi_is_deprecated(eapi):
1398                         return False
1399                 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1400                         return False
1401         if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1402                 return False
1403         if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1404                 return False
1405         try:
1406                 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1407                         return False
1408         except portage.exception.InvalidDependString:
1409                 return False
1410         return True
1411
1412 def get_masking_status(pkg, pkgsettings, root_config):
1413
1414         mreasons = portage.getmaskingstatus(
1415                 pkg, settings=pkgsettings,
1416                 portdb=root_config.trees["porttree"].dbapi)
1417
1418         if not pkg.installed:
1419                 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1420                         mreasons.append("CHOST: %s" % \
1421                                 pkg.metadata["CHOST"])
1422
1423         if not pkg.metadata["SLOT"]:
1424                 mreasons.append("invalid: SLOT is undefined")
1425
1426         return mreasons
1427
1428 def get_mask_info(root_config, cpv, pkgsettings,
1429         db, pkg_type, built, installed, db_keys):
1430         eapi_masked = False
1431         try:
1432                 metadata = dict(izip(db_keys,
1433                         db.aux_get(cpv, db_keys)))
1434         except KeyError:
1435                 metadata = None
1436         if metadata and not built:
1437                 pkgsettings.setcpv(cpv, mydb=metadata)
1438                 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1439                 metadata['CHOST'] = pkgsettings.get('CHOST', '')
1440         if metadata is None:
1441                 mreasons = ["corruption"]
1442         else:
1443                 pkg = Package(type_name=pkg_type, root_config=root_config,
1444                         cpv=cpv, built=built, installed=installed, metadata=metadata)
1445                 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1446         return metadata, mreasons
1447
1448 def show_masked_packages(masked_packages):
1449         shown_licenses = set()
1450         shown_comments = set()
1451         # Maybe there is both an ebuild and a binary. Only
1452         # show one of them to avoid redundant appearance.
1453         shown_cpvs = set()
1454         have_eapi_mask = False
1455         for (root_config, pkgsettings, cpv,
1456                 metadata, mreasons) in masked_packages:
1457                 if cpv in shown_cpvs:
1458                         continue
1459                 shown_cpvs.add(cpv)
1460                 comment, filename = None, None
1461                 if "package.mask" in mreasons:
1462                         comment, filename = \
1463                                 portage.getmaskingreason(
1464                                 cpv, metadata=metadata,
1465                                 settings=pkgsettings,
1466                                 portdb=root_config.trees["porttree"].dbapi,
1467                                 return_location=True)
1468                 missing_licenses = []
1469                 if metadata:
1470                         if not portage.eapi_is_supported(metadata["EAPI"]):
1471                                 have_eapi_mask = True
1472                         try:
1473                                 missing_licenses = \
1474                                         pkgsettings._getMissingLicenses(
1475                                                 cpv, metadata)
1476                         except portage.exception.InvalidDependString:
1477                                 # This will have already been reported
1478                                 # above via mreasons.
1479                                 pass
1480
1481                 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1482                 if comment and comment not in shown_comments:
1483                         print filename+":"
1484                         print comment
1485                         shown_comments.add(comment)
1486                 portdb = root_config.trees["porttree"].dbapi
1487                 for l in missing_licenses:
1488                         l_path = portdb.findLicensePath(l)
1489                         if l in shown_licenses:
1490                                 continue
1491                         msg = ("A copy of the '%s' license" + \
1492                         " is located at '%s'.") % (l, l_path)
1493                         print msg
1494                         print
1495                         shown_licenses.add(l)
1496         return have_eapi_mask
1497
1498 class Task(SlotObject):
1499         __slots__ = ("_hash_key", "_hash_value")
1500
1501         def _get_hash_key(self):
1502                 hash_key = getattr(self, "_hash_key", None)
1503                 if hash_key is None:
1504                         raise NotImplementedError(self)
1505                 return hash_key
1506
1507         def __eq__(self, other):
1508                 return self._get_hash_key() == other
1509
1510         def __ne__(self, other):
1511                 return self._get_hash_key() != other
1512
1513         def __hash__(self):
1514                 hash_value = getattr(self, "_hash_value", None)
1515                 if hash_value is None:
1516                         self._hash_value = hash(self._get_hash_key())
1517                 return self._hash_value
1518
1519         def __len__(self):
1520                 return len(self._get_hash_key())
1521
1522         def __getitem__(self, key):
1523                 return self._get_hash_key()[key]
1524
1525         def __iter__(self):
1526                 return iter(self._get_hash_key())
1527
1528         def __contains__(self, key):
1529                 return key in self._get_hash_key()
1530
1531         def __str__(self):
1532                 return str(self._get_hash_key())
1533
1534 class Blocker(Task):
1535
1536         __hash__ = Task.__hash__
1537         __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1538
1539         def __init__(self, **kwargs):
1540                 Task.__init__(self, **kwargs)
1541                 self.cp = portage.dep_getkey(self.atom)
1542
1543         def _get_hash_key(self):
1544                 hash_key = getattr(self, "_hash_key", None)
1545                 if hash_key is None:
1546                         self._hash_key = \
1547                                 ("blocks", self.root, self.atom, self.eapi)
1548                 return self._hash_key
1549
1550 class Package(Task):
1551
1552         __hash__ = Task.__hash__
1553         __slots__ = ("built", "cpv", "depth",
1554                 "installed", "metadata", "onlydeps", "operation",
1555                 "root_config", "type_name",
1556                 "category", "counter", "cp", "cpv_split",
1557                 "inherited", "iuse", "mtime",
1558                 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1559
1560         metadata_keys = [
1561                 "CHOST", "COUNTER", "DEPEND", "EAPI",
1562                 "INHERITED", "IUSE", "KEYWORDS",
1563                 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1564                 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1565
1566         def __init__(self, **kwargs):
1567                 Task.__init__(self, **kwargs)
1568                 self.root = self.root_config.root
1569                 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1570                 self.cp = portage.cpv_getkey(self.cpv)
1571                 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, self.slot))
1572                 self.category, self.pf = portage.catsplit(self.cpv)
1573                 self.cpv_split = portage.catpkgsplit(self.cpv)
1574                 self.pv_split = self.cpv_split[1:]
1575
1576         class _use(object):
1577
1578                 __slots__ = ("__weakref__", "enabled")
1579
1580                 def __init__(self, use):
1581                         self.enabled = frozenset(use)
1582
1583         class _iuse(object):
1584
1585                 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1586
1587                 def __init__(self, tokens, iuse_implicit):
1588                         self.tokens = tuple(tokens)
1589                         self.iuse_implicit = iuse_implicit
1590                         enabled = []
1591                         disabled = []
1592                         other = []
1593                         for x in tokens:
1594                                 prefix = x[:1]
1595                                 if prefix == "+":
1596                                         enabled.append(x[1:])
1597                                 elif prefix == "-":
1598                                         disabled.append(x[1:])
1599                                 else:
1600                                         other.append(x)
1601                         self.enabled = frozenset(enabled)
1602                         self.disabled = frozenset(disabled)
1603                         self.all = frozenset(chain(enabled, disabled, other))
1604
1605                 def __getattribute__(self, name):
1606                         if name == "regex":
1607                                 try:
1608                                         return object.__getattribute__(self, "regex")
1609                                 except AttributeError:
1610                                         all = object.__getattribute__(self, "all")
1611                                         iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1612                                         # Escape anything except ".*" which is supposed
1613                                         # to pass through from _get_implicit_iuse()
1614                                         regex = (re.escape(x) for x in chain(all, iuse_implicit))
1615                                         regex = "^(%s)$" % "|".join(regex)
1616                                         regex = regex.replace("\\.\\*", ".*")
1617                                         self.regex = re.compile(regex)
1618                         return object.__getattribute__(self, name)
1619
1620         def _get_hash_key(self):
1621                 hash_key = getattr(self, "_hash_key", None)
1622                 if hash_key is None:
1623                         if self.operation is None:
1624                                 self.operation = "merge"
1625                                 if self.onlydeps or self.installed:
1626                                         self.operation = "nomerge"
1627                         self._hash_key = \
1628                                 (self.type_name, self.root, self.cpv, self.operation)
1629                 return self._hash_key
1630
1631         def __lt__(self, other):
1632                 if other.cp != self.cp:
1633                         return False
1634                 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1635                         return True
1636                 return False
1637
1638         def __le__(self, other):
1639                 if other.cp != self.cp:
1640                         return False
1641                 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1642                         return True
1643                 return False
1644
1645         def __gt__(self, other):
1646                 if other.cp != self.cp:
1647                         return False
1648                 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1649                         return True
1650                 return False
1651
1652         def __ge__(self, other):
1653                 if other.cp != self.cp:
1654                         return False
1655                 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1656                         return True
1657                 return False
1658
1659 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1660         if not x.startswith("UNUSED_"))
1661 _all_metadata_keys.discard("CDEPEND")
1662 _all_metadata_keys.update(Package.metadata_keys)
1663
1664 from portage.cache.mappings import slot_dict_class
1665 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1666
1667 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1668         """
1669         Detect metadata updates and synchronize Package attributes.
1670         """
1671
1672         __slots__ = ("_pkg",)
1673         _wrapped_keys = frozenset(
1674                 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1675
1676         def __init__(self, pkg, metadata):
1677                 _PackageMetadataWrapperBase.__init__(self)
1678                 self._pkg = pkg
1679                 self.update(metadata)
1680
1681         def __setitem__(self, k, v):
1682                 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1683                 if k in self._wrapped_keys:
1684                         getattr(self, "_set_" + k.lower())(k, v)
1685
1686         def _set_inherited(self, k, v):
1687                 if isinstance(v, basestring):
1688                         v = frozenset(v.split())
1689                 self._pkg.inherited = v
1690
1691         def _set_iuse(self, k, v):
1692                 self._pkg.iuse = self._pkg._iuse(
1693                         v.split(), self._pkg.root_config.iuse_implicit)
1694
1695         def _set_slot(self, k, v):
1696                 self._pkg.slot = v
1697
1698         def _set_use(self, k, v):
1699                 self._pkg.use = self._pkg._use(v.split())
1700
1701         def _set_counter(self, k, v):
1702                 if isinstance(v, basestring):
1703                         try:
1704                                 v = long(v.strip())
1705                         except ValueError:
1706                                 v = 0
1707                 self._pkg.counter = v
1708
1709         def _set__mtime_(self, k, v):
1710                 if isinstance(v, basestring):
1711                         try:
1712                                 v = long(v.strip())
1713                         except ValueError:
1714                                 v = 0
1715                 self._pkg.mtime = v
1716
1717 class EbuildFetchonly(SlotObject):
1718
1719         __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1720
1721         def execute(self):
1722                 settings = self.settings
1723                 pkg = self.pkg
1724                 portdb = pkg.root_config.trees["porttree"].dbapi
1725                 ebuild_path = portdb.findname(pkg.cpv)
1726                 settings.setcpv(pkg)
1727                 debug = settings.get("PORTAGE_DEBUG") == "1"
1728                 use_cache = 1 # always true
1729                 portage.doebuild_environment(ebuild_path, "fetch",
1730                         settings["ROOT"], settings, debug, use_cache, portdb)
1731                 restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1732
1733                 if restrict_fetch:
1734                         rval = self._execute_with_builddir()
1735                 else:
1736                         rval = portage.doebuild(ebuild_path, "fetch",
1737                                 settings["ROOT"], settings, debug=debug,
1738                                 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1739                                 mydbapi=portdb, tree="porttree")
1740
1741                         if rval != os.EX_OK:
1742                                 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1743                                 eerror(msg, phase="unpack", key=pkg.cpv)
1744
1745                 return rval
1746
1747         def _execute_with_builddir(self):
1748                 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1749                 # ensuring sane $PWD (bug #239560) and storing elog
1750                 # messages. Use a private temp directory, in order
1751                 # to avoid locking the main one.
1752                 settings = self.settings
1753                 global_tmpdir = settings["PORTAGE_TMPDIR"]
1754                 from tempfile import mkdtemp
1755                 try:
1756                         private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1757                 except OSError, e:
1758                         if e.errno != portage.exception.PermissionDenied.errno:
1759                                 raise
1760                         raise portage.exception.PermissionDenied(global_tmpdir)
1761                 settings["PORTAGE_TMPDIR"] = private_tmpdir
1762                 settings.backup_changes("PORTAGE_TMPDIR")
1763                 try:
1764                         retval = self._execute()
1765                 finally:
1766                         settings["PORTAGE_TMPDIR"] = global_tmpdir
1767                         settings.backup_changes("PORTAGE_TMPDIR")
1768                         shutil.rmtree(private_tmpdir)
1769                 return retval
1770
1771         def _execute(self):
1772                 settings = self.settings
1773                 pkg = self.pkg
1774                 root_config = pkg.root_config
1775                 portdb = root_config.trees["porttree"].dbapi
1776                 ebuild_path = portdb.findname(pkg.cpv)
1777                 debug = settings.get("PORTAGE_DEBUG") == "1"
1778                 portage.prepare_build_dirs(self.pkg.root, self.settings, 0)
1779
1780                 retval = portage.doebuild(ebuild_path, "fetch",
1781                         self.settings["ROOT"], self.settings, debug=debug,
1782                         listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1783                         mydbapi=portdb, tree="porttree")
1784
1785                 if retval != os.EX_OK:
1786                         msg = "Fetch failed for '%s'" % (pkg.cpv,)
1787                         eerror(msg, phase="unpack", key=pkg.cpv)
1788
1789                 portage.elog.elog_process(self.pkg.cpv, self.settings)
1790                 return retval
1791
1792 class PollConstants(object):
1793
1794         """
1795         Provides POLL* constants that are equivalent to those from the
1796         select module, for use by PollSelectAdapter.
1797         """
1798
1799         names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1800         v = 1
1801         for k in names:
1802                 locals()[k] = getattr(select, k, v)
1803                 v *= 2
1804         del k, v
1805
1806 class AsynchronousTask(SlotObject):
1807         """
1808         Subclasses override _wait() and _poll() so that calls
1809         to public methods can be wrapped for implementing
1810         hooks such as exit listener notification.
1811
1812         Sublasses should call self.wait() to notify exit listeners after
1813         the task is complete and self.returncode has been set.
1814         """
1815
1816         __slots__ = ("background", "cancelled", "returncode") + \
1817                 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1818
1819         def start(self):
1820                 """
1821                 Start an asynchronous task and then return as soon as possible.
1822                 """
1823                 self._start_hook()
1824                 self._start()
1825
1826         def _start(self):
1827                 raise NotImplementedError(self)
1828
1829         def isAlive(self):
1830                 return self.returncode is None
1831
1832         def poll(self):
1833                 self._wait_hook()
1834                 return self._poll()
1835
1836         def _poll(self):
1837                 return self.returncode
1838
1839         def wait(self):
1840                 if self.returncode is None:
1841                         self._wait()
1842                 self._wait_hook()
1843                 return self.returncode
1844
1845         def _wait(self):
1846                 return self.returncode
1847
1848         def cancel(self):
1849                 self.cancelled = True
1850                 self.wait()
1851
1852         def addStartListener(self, f):
1853                 """
1854                 The function will be called with one argument, a reference to self.
1855                 """
1856                 if self._start_listeners is None:
1857                         self._start_listeners = []
1858                 self._start_listeners.append(f)
1859
1860         def removeStartListener(self, f):
1861                 if self._start_listeners is None:
1862                         return
1863                 self._start_listeners.remove(f)
1864
1865         def _start_hook(self):
1866                 if self._start_listeners is not None:
1867                         start_listeners = self._start_listeners
1868                         self._start_listeners = None
1869
1870                         for f in start_listeners:
1871                                 f(self)
1872
1873         def addExitListener(self, f):
1874                 """
1875                 The function will be called with one argument, a reference to self.
1876                 """
1877                 if self._exit_listeners is None:
1878                         self._exit_listeners = []
1879                 self._exit_listeners.append(f)
1880
1881         def removeExitListener(self, f):
1882                 if self._exit_listeners is None:
1883                         if self._exit_listener_stack is not None:
1884                                 self._exit_listener_stack.remove(f)
1885                         return
1886                 self._exit_listeners.remove(f)
1887
1888         def _wait_hook(self):
1889                 """
1890                 Call this method after the task completes, just before returning
1891                 the returncode from wait() or poll(). This hook is
1892                 used to trigger exit listeners when the returncode first
1893                 becomes available.
1894                 """
1895                 if self.returncode is not None and \
1896                         self._exit_listeners is not None:
1897
1898                         # This prevents recursion, in case one of the
1899                         # exit handlers triggers this method again by
1900                         # calling wait(). Use a stack that gives
1901                         # removeExitListener() an opportunity to consume
1902                         # listeners from the stack, before they can get
1903                         # called below. This is necessary because a call
1904                         # to one exit listener may result in a call to
1905                         # removeExitListener() for another listener on
1906                         # the stack. That listener needs to be removed
1907                         # from the stack since it would be inconsistent
1908                         # to call it after it has been been passed into
1909                         # removeExitListener().
1910                         self._exit_listener_stack = self._exit_listeners
1911                         self._exit_listeners = None
1912
1913                         self._exit_listener_stack.reverse()
1914                         while self._exit_listener_stack:
1915                                 self._exit_listener_stack.pop()(self)
1916
1917 class AbstractPollTask(AsynchronousTask):
1918
1919         __slots__ = ("scheduler",) + \
1920                 ("_registered",)
1921
1922         _bufsize = 4096
1923         _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1924         _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1925                 _exceptional_events
1926
1927         def _unregister(self):
1928                 raise NotImplementedError(self)
1929
1930         def _unregister_if_appropriate(self, event):
1931                 if self._registered:
1932                         if event & self._exceptional_events:
1933                                 self._unregister()
1934                                 self.cancel()
1935                         elif event & PollConstants.POLLHUP:
1936                                 self._unregister()
1937                                 self.wait()
1938
1939 class PipeReader(AbstractPollTask):
1940
1941         """
1942         Reads output from one or more files and saves it in memory,
1943         for retrieval via the getvalue() method. This is driven by
1944         the scheduler's poll() loop, so it runs entirely within the
1945         current process.
1946         """
1947
1948         __slots__ = ("input_files",) + \
1949                 ("_read_data", "_reg_ids")
1950
1951         def _start(self):
1952                 self._reg_ids = set()
1953                 self._read_data = []
1954                 for k, f in self.input_files.iteritems():
1955                         fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1956                                 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1957                         self._reg_ids.add(self.scheduler.register(f.fileno(),
1958                                 self._registered_events, self._output_handler))
1959                 self._registered = True
1960
1961         def isAlive(self):
1962                 return self._registered
1963
1964         def cancel(self):
1965                 if self.returncode is None:
1966                         self.returncode = 1
1967                         self.cancelled = True
1968                 self.wait()
1969
1970         def _wait(self):
1971                 if self.returncode is not None:
1972                         return self.returncode
1973
1974                 if self._registered:
1975                         self.scheduler.schedule(self._reg_ids)
1976                         self._unregister()
1977
1978                 self.returncode = os.EX_OK
1979                 return self.returncode
1980
1981         def getvalue(self):
1982                 """Retrieve the entire contents"""
1983                 if sys.hexversion >= 0x3000000:
1984                         return bytes().join(self._read_data)
1985                 return "".join(self._read_data)
1986
1987         def close(self):
1988                 """Free the memory buffer."""
1989                 self._read_data = None
1990
1991         def _output_handler(self, fd, event):
1992
1993                 if event & PollConstants.POLLIN:
1994
1995                         for f in self.input_files.itervalues():
1996                                 if fd == f.fileno():
1997                                         break
1998
1999                         buf = array.array('B')
2000                         try:
2001                                 buf.fromfile(f, self._bufsize)
2002                         except EOFError:
2003                                 pass
2004
2005                         if buf:
2006                                 self._read_data.append(buf.tostring())
2007                         else:
2008                                 self._unregister()
2009                                 self.wait()
2010
2011                 self._unregister_if_appropriate(event)
2012                 return self._registered
2013
2014         def _unregister(self):
2015                 """
2016                 Unregister from the scheduler and close open files.
2017                 """
2018
2019                 self._registered = False
2020
2021                 if self._reg_ids is not None:
2022                         for reg_id in self._reg_ids:
2023                                 self.scheduler.unregister(reg_id)
2024                         self._reg_ids = None
2025
2026                 if self.input_files is not None:
2027                         for f in self.input_files.itervalues():
2028                                 f.close()
2029                         self.input_files = None
2030
2031 class CompositeTask(AsynchronousTask):
2032
2033         __slots__ = ("scheduler",) + ("_current_task",)
2034
2035         def isAlive(self):
2036                 return self._current_task is not None
2037
2038         def cancel(self):
2039                 self.cancelled = True
2040                 if self._current_task is not None:
2041                         self._current_task.cancel()
2042
2043         def _poll(self):
2044                 """
2045                 This does a loop calling self._current_task.poll()
2046                 repeatedly as long as the value of self._current_task
2047                 keeps changing. It calls poll() a maximum of one time
2048                 for a given self._current_task instance. This is useful
2049                 since calling poll() on a task can trigger advance to
2050                 the next task could eventually lead to the returncode
2051                 being set in cases when polling only a single task would
2052                 not have the same effect.
2053                 """
2054
2055                 prev = None
2056                 while True:
2057                         task = self._current_task
2058                         if task is None or task is prev:
2059                                 # don't poll the same task more than once
2060                                 break
2061                         task.poll()
2062                         prev = task
2063
2064                 return self.returncode
2065
2066         def _wait(self):
2067
2068                 prev = None
2069                 while True:
2070                         task = self._current_task
2071                         if task is None:
2072                                 # don't wait for the same task more than once
2073                                 break
2074                         if task is prev:
2075                                 # Before the task.wait() method returned, an exit
2076                                 # listener should have set self._current_task to either
2077                                 # a different task or None. Something is wrong.
2078                                 raise AssertionError("self._current_task has not " + \
2079                                         "changed since calling wait", self, task)
2080                         task.wait()
2081                         prev = task
2082
2083                 return self.returncode
2084
2085         def _assert_current(self, task):
2086                 """
2087                 Raises an AssertionError if the given task is not the
2088                 same one as self._current_task. This can be useful
2089                 for detecting bugs.
2090                 """
2091                 if task is not self._current_task:
2092                         raise AssertionError("Unrecognized task: %s" % (task,))
2093
2094         def _default_exit(self, task):
2095                 """
2096                 Calls _assert_current() on the given task and then sets the
2097                 composite returncode attribute if task.returncode != os.EX_OK.
2098                 If the task failed then self._current_task will be set to None.
2099                 Subclasses can use this as a generic task exit callback.
2100
2101                 @rtype: int
2102                 @returns: The task.returncode attribute.
2103                 """
2104                 self._assert_current(task)
2105                 if task.returncode != os.EX_OK:
2106                         self.returncode = task.returncode
2107                         self._current_task = None
2108                 return task.returncode
2109
2110         def _final_exit(self, task):
2111                 """
2112                 Assumes that task is the final task of this composite task.
2113                 Calls _default_exit() and sets self.returncode to the task's
2114                 returncode and sets self._current_task to None.
2115                 """
2116                 self._default_exit(task)
2117                 self._current_task = None
2118                 self.returncode = task.returncode
2119                 return self.returncode
2120
2121         def _default_final_exit(self, task):
2122                 """
2123                 This calls _final_exit() and then wait().
2124
2125                 Subclasses can use this as a generic final task exit callback.
2126
2127                 """
2128                 self._final_exit(task)
2129                 return self.wait()
2130
2131         def _start_task(self, task, exit_handler):
2132                 """
2133                 Register exit handler for the given task, set it
2134                 as self._current_task, and call task.start().
2135
2136                 Subclasses can use this as a generic way to start
2137                 a task.
2138
2139                 """
2140                 task.addExitListener(exit_handler)
2141                 self._current_task = task
2142                 task.start()
2143
2144 class TaskSequence(CompositeTask):
2145         """
2146         A collection of tasks that executes sequentially. Each task
2147         must have a addExitListener() method that can be used as
2148         a means to trigger movement from one task to the next.
2149         """
2150
2151         __slots__ = ("_task_queue",)
2152
2153         def __init__(self, **kwargs):
2154                 AsynchronousTask.__init__(self, **kwargs)
2155                 self._task_queue = deque()
2156
2157         def add(self, task):
2158                 self._task_queue.append(task)
2159
2160         def _start(self):
2161                 self._start_next_task()
2162
2163         def cancel(self):
2164                 self._task_queue.clear()
2165                 CompositeTask.cancel(self)
2166
2167         def _start_next_task(self):
2168                 self._start_task(self._task_queue.popleft(),
2169                         self._task_exit_handler)
2170
2171         def _task_exit_handler(self, task):
2172                 if self._default_exit(task) != os.EX_OK:
2173                         self.wait()
2174                 elif self._task_queue:
2175                         self._start_next_task()
2176                 else:
2177                         self._final_exit(task)
2178                         self.wait()
2179
2180 class SubProcess(AbstractPollTask):
2181
2182         __slots__ = ("pid",) + \
2183                 ("_files", "_reg_id")
2184
2185         # A file descriptor is required for the scheduler to monitor changes from
2186         # inside a poll() loop. When logging is not enabled, create a pipe just to
2187         # serve this purpose alone.
2188         _dummy_pipe_fd = 9
2189
2190         def _poll(self):
2191                 if self.returncode is not None:
2192                         return self.returncode
2193                 if self.pid is None:
2194                         return self.returncode
2195                 if self._registered:
2196                         return self.returncode
2197
2198                 try:
2199                         retval = os.waitpid(self.pid, os.WNOHANG)
2200                 except OSError, e:
2201                         if e.errno != errno.ECHILD:
2202                                 raise
2203                         del e
2204                         retval = (self.pid, 1)
2205
2206                 if retval == (0, 0):
2207                         return None
2208                 self._set_returncode(retval)
2209                 return self.returncode
2210
2211         def cancel(self):
2212                 if self.isAlive():
2213                         try:
2214                                 os.kill(self.pid, signal.SIGTERM)
2215                         except OSError, e:
2216                                 if e.errno != errno.ESRCH:
2217                                         raise
2218                                 del e
2219
2220                 self.cancelled = True
2221                 if self.pid is not None:
2222                         self.wait()
2223                 return self.returncode
2224
2225         def isAlive(self):
2226                 return self.pid is not None and \
2227                         self.returncode is None
2228
2229         def _wait(self):
2230
2231                 if self.returncode is not None:
2232                         return self.returncode
2233
2234                 if self._registered:
2235                         self.scheduler.schedule(self._reg_id)
2236                         self._unregister()
2237                         if self.returncode is not None:
2238                                 return self.returncode
2239
2240                 try:
2241                         wait_retval = os.waitpid(self.pid, 0)
2242                 except OSError, e:
2243                         if e.errno != errno.ECHILD:
2244                                 raise
2245                         del e
2246                         self._set_returncode((self.pid, 1))
2247                 else:
2248                         self._set_returncode(wait_retval)
2249
2250                 return self.returncode
2251
2252         def _unregister(self):
2253                 """
2254                 Unregister from the scheduler and close open files.
2255                 """
2256
2257                 self._registered = False
2258
2259                 if self._reg_id is not None:
2260                         self.scheduler.unregister(self._reg_id)
2261                         self._reg_id = None
2262
2263                 if self._files is not None:
2264                         for f in self._files.itervalues():
2265                                 f.close()
2266                         self._files = None
2267
2268         def _set_returncode(self, wait_retval):
2269
2270                 retval = wait_retval[1]
2271
2272                 if retval != os.EX_OK:
2273                         if retval & 0xff:
2274                                 retval = (retval & 0xff) << 8
2275                         else:
2276                                 retval = retval >> 8
2277
2278                 self.returncode = retval
2279
2280 class SpawnProcess(SubProcess):
2281
2282         """
2283         Constructor keyword args are passed into portage.process.spawn().
2284         The required "args" keyword argument will be passed as the first
2285         spawn() argument.
2286         """
2287
2288         _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2289                 "uid", "gid", "groups", "umask", "logfile",
2290                 "path_lookup", "pre_exec")
2291
2292         __slots__ = ("args",) + \
2293                 _spawn_kwarg_names
2294
2295         _file_names = ("log", "process", "stdout")
2296         _files_dict = slot_dict_class(_file_names, prefix="")
2297
2298         def _start(self):
2299
2300                 if self.cancelled:
2301                         return
2302
2303                 if self.fd_pipes is None:
2304                         self.fd_pipes = {}
2305                 fd_pipes = self.fd_pipes
2306                 fd_pipes.setdefault(0, sys.stdin.fileno())
2307                 fd_pipes.setdefault(1, sys.stdout.fileno())
2308                 fd_pipes.setdefault(2, sys.stderr.fileno())
2309
2310                 # flush any pending output
2311                 for fd in fd_pipes.itervalues():
2312                         if fd == sys.stdout.fileno():
2313                                 sys.stdout.flush()
2314                         if fd == sys.stderr.fileno():
2315                                 sys.stderr.flush()
2316
2317                 logfile = self.logfile
2318                 self._files = self._files_dict()
2319                 files = self._files
2320
2321                 master_fd, slave_fd = self._pipe(fd_pipes)
2322                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2323                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2324
2325                 null_input = None
2326                 fd_pipes_orig = fd_pipes.copy()
2327                 if self.background:
2328                         # TODO: Use job control functions like tcsetpgrp() to control
2329                         # access to stdin. Until then, use /dev/null so that any
2330                         # attempts to read from stdin will immediately return EOF
2331                         # instead of blocking indefinitely.
2332                         null_input = open('/dev/null', 'rb')
2333                         fd_pipes[0] = null_input.fileno()
2334                 else:
2335                         fd_pipes[0] = fd_pipes_orig[0]
2336
2337                 files.process = os.fdopen(master_fd, 'rb')
2338                 if logfile is not None:
2339
2340                         fd_pipes[1] = slave_fd
2341                         fd_pipes[2] = slave_fd
2342
2343                         files.log = open(logfile, mode='ab')
2344                         portage.util.apply_secpass_permissions(logfile,
2345                                 uid=portage.portage_uid, gid=portage.portage_gid,
2346                                 mode=0660)
2347
2348                         if not self.background:
2349                                 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2350
2351                         output_handler = self._output_handler
2352
2353                 else:
2354
2355                         # Create a dummy pipe so the scheduler can monitor
2356                         # the process from inside a poll() loop.
2357                         fd_pipes[self._dummy_pipe_fd] = slave_fd
2358                         if self.background:
2359                                 fd_pipes[1] = slave_fd
2360                                 fd_pipes[2] = slave_fd
2361                         output_handler = self._dummy_handler
2362
2363                 kwargs = {}
2364                 for k in self._spawn_kwarg_names:
2365                         v = getattr(self, k)
2366                         if v is not None:
2367                                 kwargs[k] = v
2368
2369                 kwargs["fd_pipes"] = fd_pipes
2370                 kwargs["returnpid"] = True
2371                 kwargs.pop("logfile", None)
2372
2373                 self._reg_id = self.scheduler.register(files.process.fileno(),
2374                         self._registered_events, output_handler)
2375                 self._registered = True
2376
2377                 retval = self._spawn(self.args, **kwargs)
2378
2379                 os.close(slave_fd)
2380                 if null_input is not None:
2381                         null_input.close()
2382
2383                 if isinstance(retval, int):
2384                         # spawn failed
2385                         self._unregister()
2386                         self.returncode = retval
2387                         self.wait()
2388                         return
2389
2390                 self.pid = retval[0]
2391                 portage.process.spawned_pids.remove(self.pid)
2392
2393         def _pipe(self, fd_pipes):
2394                 """
2395                 @type fd_pipes: dict
2396                 @param fd_pipes: pipes from which to copy terminal size if desired.
2397                 """
2398                 return os.pipe()
2399
2400         def _spawn(self, args, **kwargs):
2401                 return portage.process.spawn(args, **kwargs)
2402
2403         def _output_handler(self, fd, event):
2404
2405                 if event & PollConstants.POLLIN:
2406
2407                         files = self._files
2408                         buf = array.array('B')
2409                         try:
2410                                 buf.fromfile(files.process, self._bufsize)
2411                         except EOFError:
2412                                 pass
2413
2414                         if buf:
2415                                 if not self.background:
2416                                         buf.tofile(files.stdout)
2417                                         files.stdout.flush()
2418                                 buf.tofile(files.log)
2419                                 files.log.flush()
2420                         else:
2421                                 self._unregister()
2422                                 self.wait()
2423
2424                 self._unregister_if_appropriate(event)
2425                 return self._registered
2426
2427         def _dummy_handler(self, fd, event):
2428                 """
2429                 This method is mainly interested in detecting EOF, since
2430                 the only purpose of the pipe is to allow the scheduler to
2431                 monitor the process from inside a poll() loop.
2432                 """
2433
2434                 if event & PollConstants.POLLIN:
2435
2436                         buf = array.array('B')
2437                         try:
2438                                 buf.fromfile(self._files.process, self._bufsize)
2439                         except EOFError:
2440                                 pass
2441
2442                         if buf:
2443                                 pass
2444                         else:
2445                                 self._unregister()
2446                                 self.wait()
2447
2448                 self._unregister_if_appropriate(event)
2449                 return self._registered
2450
2451 class MiscFunctionsProcess(SpawnProcess):
2452         """
2453         Spawns misc-functions.sh with an existing ebuild environment.
2454         """
2455
2456         __slots__ = ("commands", "phase", "pkg", "settings")
2457
2458         def _start(self):
2459                 settings = self.settings
2460                 settings.pop("EBUILD_PHASE", None)
2461                 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2462                 misc_sh_binary = os.path.join(portage_bin_path,
2463                         os.path.basename(portage.const.MISC_SH_BINARY))
2464
2465                 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2466                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2467
2468                 portage._doebuild_exit_status_unlink(
2469                         settings.get("EBUILD_EXIT_STATUS_FILE"))
2470
2471                 SpawnProcess._start(self)
2472
2473         def _spawn(self, args, **kwargs):
2474                 settings = self.settings
2475                 debug = settings.get("PORTAGE_DEBUG") == "1"
2476                 return portage.spawn(" ".join(args), settings,
2477                         debug=debug, **kwargs)
2478
2479         def _set_returncode(self, wait_retval):
2480                 SpawnProcess._set_returncode(self, wait_retval)
2481                 self.returncode = portage._doebuild_exit_status_check_and_log(
2482                         self.settings, self.phase, self.returncode)
2483
2484 class EbuildFetcher(SpawnProcess):
2485
2486         __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2487                 ("_build_dir",)
2488
2489         def _start(self):
2490
2491                 root_config = self.pkg.root_config
2492                 portdb = root_config.trees["porttree"].dbapi
2493                 ebuild_path = portdb.findname(self.pkg.cpv)
2494                 settings = self.config_pool.allocate()
2495                 settings.setcpv(self.pkg)
2496
2497                 # In prefetch mode, logging goes to emerge-fetch.log and the builddir
2498                 # should not be touched since otherwise it could interfere with
2499                 # another instance of the same cpv concurrently being built for a
2500                 # different $ROOT (currently, builds only cooperate with prefetchers
2501                 # that are spawned for the same $ROOT).
2502                 if not self.prefetch:
2503                         self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2504                         self._build_dir.lock()
2505                         self._build_dir.clean()
2506                         portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2507                         if self.logfile is None:
2508                                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2509
2510                 phase = "fetch"
2511                 if self.fetchall:
2512                         phase = "fetchall"
2513
2514                 # If any incremental variables have been overridden
2515                 # via the environment, those values need to be passed
2516                 # along here so that they are correctly considered by
2517                 # the config instance in the subproccess.
2518                 fetch_env = os.environ.copy()
2519
2520                 nocolor = settings.get("NOCOLOR")
2521                 if nocolor is not None:
2522                         fetch_env["NOCOLOR"] = nocolor
2523
2524                 fetch_env["PORTAGE_NICENESS"] = "0"
2525                 if self.prefetch:
2526                         fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2527
2528                 ebuild_binary = os.path.join(
2529                         settings["PORTAGE_BIN_PATH"], "ebuild")
2530
2531                 fetch_args = [ebuild_binary, ebuild_path, phase]
2532                 debug = settings.get("PORTAGE_DEBUG") == "1"
2533                 if debug:
2534                         fetch_args.append("--debug")
2535
2536                 self.args = fetch_args
2537                 self.env = fetch_env
2538                 SpawnProcess._start(self)
2539
2540         def _pipe(self, fd_pipes):
2541                 """When appropriate, use a pty so that fetcher progress bars,
2542                 like wget has, will work properly."""
2543                 if self.background or not sys.stdout.isatty():
2544                         # When the output only goes to a log file,
2545                         # there's no point in creating a pty.
2546                         return os.pipe()
2547                 stdout_pipe = fd_pipes.get(1)
2548                 got_pty, master_fd, slave_fd = \
2549                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2550                 return (master_fd, slave_fd)
2551
2552         def _set_returncode(self, wait_retval):
2553                 SpawnProcess._set_returncode(self, wait_retval)
2554                 # Collect elog messages that might have been
2555                 # created by the pkg_nofetch phase.
2556                 if self._build_dir is not None:
2557                         # Skip elog messages for prefetch, in order to avoid duplicates.
2558                         if not self.prefetch and self.returncode != os.EX_OK:
2559                                 elog_out = None
2560                                 if self.logfile is not None:
2561                                         if self.background:
2562                                                 elog_out = open(self.logfile, 'a')
2563                                 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2564                                 if self.logfile is not None:
2565                                         msg += ", Log file:"
2566                                 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2567                                 if self.logfile is not None:
2568                                         eerror(" '%s'" % (self.logfile,),
2569                                                 phase="unpack", key=self.pkg.cpv, out=elog_out)
2570                                 if elog_out is not None:
2571                                         elog_out.close()
2572                         if not self.prefetch:
2573                                 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2574                         features = self._build_dir.settings.features
2575                         if self.returncode == os.EX_OK:
2576                                 self._build_dir.clean()
2577                         self._build_dir.unlock()
2578                         self.config_pool.deallocate(self._build_dir.settings)
2579                         self._build_dir = None
2580
2581 class EbuildBuildDir(SlotObject):
2582
2583         __slots__ = ("dir_path", "pkg", "settings",
2584                 "locked", "_catdir", "_lock_obj")
2585
2586         def __init__(self, **kwargs):
2587                 SlotObject.__init__(self, **kwargs)
2588                 self.locked = False
2589
2590         def lock(self):
2591                 """
2592                 This raises an AlreadyLocked exception if lock() is called
2593                 while a lock is already held. In order to avoid this, call
2594                 unlock() or check whether the "locked" attribute is True
2595                 or False before calling lock().
2596                 """
2597                 if self._lock_obj is not None:
2598                         raise self.AlreadyLocked((self._lock_obj,))
2599
2600                 dir_path = self.dir_path
2601                 if dir_path is None:
2602                         root_config = self.pkg.root_config
2603                         portdb = root_config.trees["porttree"].dbapi
2604                         ebuild_path = portdb.findname(self.pkg.cpv)
2605                         settings = self.settings
2606                         settings.setcpv(self.pkg)
2607                         debug = settings.get("PORTAGE_DEBUG") == "1"
2608                         use_cache = 1 # always true
2609                         portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2610                                 self.settings, debug, use_cache, portdb)
2611                         dir_path = self.settings["PORTAGE_BUILDDIR"]
2612
2613                 catdir = os.path.dirname(dir_path)
2614                 self._catdir = catdir
2615
2616                 portage.util.ensure_dirs(os.path.dirname(catdir),
2617                         gid=portage.portage_gid,
2618                         mode=070, mask=0)
2619                 catdir_lock = None
2620                 try:
2621                         catdir_lock = portage.locks.lockdir(catdir)
2622                         portage.util.ensure_dirs(catdir,
2623                                 gid=portage.portage_gid,
2624                                 mode=070, mask=0)
2625                         self._lock_obj = portage.locks.lockdir(dir_path)
2626                 finally:
2627                         self.locked = self._lock_obj is not None
2628                         if catdir_lock is not None:
2629                                 portage.locks.unlockdir(catdir_lock)
2630
2631         def clean(self):
2632                 """Uses shutil.rmtree() rather than spawning a 'clean' phase. Disabled
2633                 by keepwork or keeptemp in FEATURES."""
2634                 settings = self.settings
2635                 features = settings.features
2636                 if not ("keepwork" in features or "keeptemp" in features):
2637                         try:
2638                                 shutil.rmtree(settings["PORTAGE_BUILDDIR"])
2639                         except EnvironmentError, e:
2640                                 if e.errno != errno.ENOENT:
2641                                         raise
2642                                 del e
2643
2644         def unlock(self):
2645                 if self._lock_obj is None:
2646                         return
2647
2648                 portage.locks.unlockdir(self._lock_obj)
2649                 self._lock_obj = None
2650                 self.locked = False
2651
2652                 catdir = self._catdir
2653                 catdir_lock = None
2654                 try:
2655                         catdir_lock = portage.locks.lockdir(catdir)
2656                 finally:
2657                         if catdir_lock:
2658                                 try:
2659                                         os.rmdir(catdir)
2660                                 except OSError, e:
2661                                         if e.errno not in (errno.ENOENT,
2662                                                 errno.ENOTEMPTY, errno.EEXIST):
2663                                                 raise
2664                                         del e
2665                                 portage.locks.unlockdir(catdir_lock)
2666
2667         class AlreadyLocked(portage.exception.PortageException):
2668                 pass
2669
2670 class EbuildBuild(CompositeTask):
2671
2672         __slots__ = ("args_set", "config_pool", "find_blockers",
2673                 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2674                 "prefetcher", "settings", "world_atom") + \
2675                 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2676
2677         def _start(self):
2678
2679                 logger = self.logger
2680                 opts = self.opts
2681                 pkg = self.pkg
2682                 settings = self.settings
2683                 world_atom = self.world_atom
2684                 root_config = pkg.root_config
2685                 tree = "porttree"
2686                 self._tree = tree
2687                 portdb = root_config.trees[tree].dbapi
2688                 settings.setcpv(pkg)
2689                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2690                 ebuild_path = portdb.findname(self.pkg.cpv)
2691                 self._ebuild_path = ebuild_path
2692
2693                 prefetcher = self.prefetcher
2694                 if prefetcher is None:
2695                         pass
2696                 elif not prefetcher.isAlive():
2697                         prefetcher.cancel()
2698                 elif prefetcher.poll() is None:
2699
2700                         waiting_msg = "Fetching files " + \
2701                                 "in the background. " + \
2702                                 "To view fetch progress, run `tail -f " + \
2703                                 "/var/log/emerge-fetch.log` in another " + \
2704                                 "terminal."
2705                         msg_prefix = colorize("GOOD", " * ")
2706                         from textwrap import wrap
2707                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2708                                 for line in wrap(waiting_msg, 65))
2709                         if not self.background:
2710                                 writemsg(waiting_msg, noiselevel=-1)
2711
2712                         self._current_task = prefetcher
2713                         prefetcher.addExitListener(self._prefetch_exit)
2714                         return
2715
2716                 self._prefetch_exit(prefetcher)
2717
2718         def _prefetch_exit(self, prefetcher):
2719
2720                 opts = self.opts
2721                 pkg = self.pkg
2722                 settings = self.settings
2723
2724                 if opts.fetchonly:
2725                                 fetcher = EbuildFetchonly(
2726                                         fetch_all=opts.fetch_all_uri,
2727                                         pkg=pkg, pretend=opts.pretend,
2728                                         settings=settings)
2729                                 retval = fetcher.execute()
2730                                 self.returncode = retval
2731                                 self.wait()
2732                                 return
2733
2734                 fetcher = EbuildFetcher(config_pool=self.config_pool,
2735                         fetchall=opts.fetch_all_uri,
2736                         fetchonly=opts.fetchonly,
2737                         background=self.background,
2738                         pkg=pkg, scheduler=self.scheduler)
2739
2740                 self._start_task(fetcher, self._fetch_exit)
2741
2742         def _fetch_exit(self, fetcher):
2743                 opts = self.opts
2744                 pkg = self.pkg
2745
2746                 fetch_failed = False
2747                 if opts.fetchonly:
2748                         fetch_failed = self._final_exit(fetcher) != os.EX_OK
2749                 else:
2750                         fetch_failed = self._default_exit(fetcher) != os.EX_OK
2751
2752                 if fetch_failed and fetcher.logfile is not None and \
2753                         os.path.exists(fetcher.logfile):
2754                         self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2755
2756                 if not fetch_failed and fetcher.logfile is not None:
2757                         # Fetch was successful, so remove the fetch log.
2758                         try:
2759                                 os.unlink(fetcher.logfile)
2760                         except OSError:
2761                                 pass
2762
2763                 if fetch_failed or opts.fetchonly:
2764                         self.wait()
2765                         return
2766
2767                 logger = self.logger
2768                 opts = self.opts
2769                 pkg_count = self.pkg_count
2770                 scheduler = self.scheduler
2771                 settings = self.settings
2772                 features = settings.features
2773                 ebuild_path = self._ebuild_path
2774                 system_set = pkg.root_config.sets["system"]
2775
2776                 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2777                 self._build_dir.lock()
2778
2779                 # Cleaning is triggered before the setup
2780                 # phase, in portage.doebuild().
2781                 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2782                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2783                 short_msg = "emerge: (%s of %s) %s Clean" % \
2784                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2785                 logger.log(msg, short_msg=short_msg)
2786
2787                 #buildsyspkg: Check if we need to _force_ binary package creation
2788                 self._issyspkg = "buildsyspkg" in features and \
2789                                 system_set.findAtomForPackage(pkg) and \
2790                                 not opts.buildpkg
2791
2792                 if opts.buildpkg or self._issyspkg:
2793
2794                         self._buildpkg = True
2795
2796                         msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2797                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2798                         short_msg = "emerge: (%s of %s) %s Compile" % \
2799                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2800                         logger.log(msg, short_msg=short_msg)
2801
2802                 else:
2803                         msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2804                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2805                         short_msg = "emerge: (%s of %s) %s Compile" % \
2806                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2807                         logger.log(msg, short_msg=short_msg)
2808
2809                 build = EbuildExecuter(background=self.background, pkg=pkg,
2810                         scheduler=scheduler, settings=settings)
2811                 self._start_task(build, self._build_exit)
2812
2813         def _unlock_builddir(self):
2814                 portage.elog.elog_process(self.pkg.cpv, self.settings)
2815                 self._build_dir.unlock()
2816
2817         def _build_exit(self, build):
2818                 if self._default_exit(build) != os.EX_OK:
2819                         self._unlock_builddir()
2820                         self.wait()
2821                         return
2822
2823                 opts = self.opts
2824                 buildpkg = self._buildpkg
2825
2826                 if not buildpkg:
2827                         self._final_exit(build)
2828                         self.wait()
2829                         return
2830
2831                 if self._issyspkg:
2832                         msg = ">>> This is a system package, " + \
2833                                 "let's pack a rescue tarball.\n"
2834
2835                         log_path = self.settings.get("PORTAGE_LOG_FILE")
2836                         if log_path is not None:
2837                                 log_file = open(log_path, 'a')
2838                                 try:
2839                                         log_file.write(msg)
2840                                 finally:
2841                                         log_file.close()
2842
2843                         if not self.background:
2844                                 portage.writemsg_stdout(msg, noiselevel=-1)
2845
2846                 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2847                         scheduler=self.scheduler, settings=self.settings)
2848
2849                 self._start_task(packager, self._buildpkg_exit)
2850
2851         def _buildpkg_exit(self, packager):
2852                 """
2853                 Released build dir lock when there is a failure or
2854                 when in buildpkgonly mode. Otherwise, the lock will
2855                 be released when merge() is called.
2856                 """
2857
2858                 if self._default_exit(packager) != os.EX_OK:
2859                         self._unlock_builddir()
2860                         self.wait()
2861                         return
2862
2863                 if self.opts.buildpkgonly:
2864                         # Need to call "clean" phase for buildpkgonly mode
2865                         portage.elog.elog_process(self.pkg.cpv, self.settings)
2866                         phase = "clean"
2867                         clean_phase = EbuildPhase(background=self.background,
2868                                 pkg=self.pkg, phase=phase,
2869                                 scheduler=self.scheduler, settings=self.settings,
2870                                 tree=self._tree)
2871                         self._start_task(clean_phase, self._clean_exit)
2872                         return
2873
2874                 # Continue holding the builddir lock until
2875                 # after the package has been installed.
2876                 self._current_task = None
2877                 self.returncode = packager.returncode
2878                 self.wait()
2879
2880         def _clean_exit(self, clean_phase):
2881                 if self._final_exit(clean_phase) != os.EX_OK or \
2882                         self.opts.buildpkgonly:
2883                         self._unlock_builddir()
2884                 self.wait()
2885
2886         def install(self):
2887                 """
2888                 Install the package and then clean up and release locks.
2889                 Only call this after the build has completed successfully
2890                 and neither fetchonly nor buildpkgonly mode are enabled.
2891                 """
2892
2893                 find_blockers = self.find_blockers
2894                 ldpath_mtimes = self.ldpath_mtimes
2895                 logger = self.logger
2896                 pkg = self.pkg
2897                 pkg_count = self.pkg_count
2898                 settings = self.settings
2899                 world_atom = self.world_atom
2900                 ebuild_path = self._ebuild_path
2901                 tree = self._tree
2902
2903                 merge = EbuildMerge(find_blockers=self.find_blockers,
2904                         ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2905                         pkg_count=pkg_count, pkg_path=ebuild_path,
2906                         scheduler=self.scheduler,
2907                         settings=settings, tree=tree, world_atom=world_atom)
2908
2909                 msg = " === (%s of %s) Merging (%s::%s)" % \
2910                         (pkg_count.curval, pkg_count.maxval,
2911                         pkg.cpv, ebuild_path)
2912                 short_msg = "emerge: (%s of %s) %s Merge" % \
2913                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2914                 logger.log(msg, short_msg=short_msg)
2915
2916                 try:
2917                         rval = merge.execute()
2918                 finally:
2919                         self._unlock_builddir()
2920
2921                 return rval
2922
2923 class EbuildExecuter(CompositeTask):
2924
2925         __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2926
2927         _phases = ("prepare", "configure", "compile", "test", "install")
2928
2929         _live_eclasses = frozenset([
2930                 "bzr",
2931                 "cvs",
2932                 "darcs",
2933                 "git",
2934                 "mercurial",
2935                 "subversion"
2936         ])
2937
2938         def _start(self):
2939                 self._tree = "porttree"
2940                 pkg = self.pkg
2941                 phase = "clean"
2942                 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2943                         scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2944                 self._start_task(clean_phase, self._clean_phase_exit)
2945
2946         def _clean_phase_exit(self, clean_phase):
2947
2948                 if self._default_exit(clean_phase) != os.EX_OK:
2949                         self.wait()
2950                         return
2951
2952                 pkg = self.pkg
2953                 scheduler = self.scheduler
2954                 settings = self.settings
2955                 cleanup = 1
2956
2957                 # This initializes PORTAGE_LOG_FILE.
2958                 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2959
2960                 setup_phase = EbuildPhase(background=self.background,
2961                         pkg=pkg, phase="setup", scheduler=scheduler,
2962                         settings=settings, tree=self._tree)
2963
2964                 setup_phase.addExitListener(self._setup_exit)
2965                 self._current_task = setup_phase
2966                 self.scheduler.scheduleSetup(setup_phase)
2967
2968         def _setup_exit(self, setup_phase):
2969
2970                 if self._default_exit(setup_phase) != os.EX_OK:
2971                         self.wait()
2972                         return
2973
2974                 unpack_phase = EbuildPhase(background=self.background,
2975                         pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2976                         settings=self.settings, tree=self._tree)
2977
2978                 if self._live_eclasses.intersection(self.pkg.inherited):
2979                         # Serialize $DISTDIR access for live ebuilds since
2980                         # otherwise they can interfere with eachother.
2981
2982                         unpack_phase.addExitListener(self._unpack_exit)
2983                         self._current_task = unpack_phase
2984                         self.scheduler.scheduleUnpack(unpack_phase)
2985
2986                 else:
2987                         self._start_task(unpack_phase, self._unpack_exit)
2988
2989         def _unpack_exit(self, unpack_phase):
2990
2991                 if self._default_exit(unpack_phase) != os.EX_OK:
2992                         self.wait()
2993                         return
2994
2995                 ebuild_phases = TaskSequence(scheduler=self.scheduler)
2996
2997                 pkg = self.pkg
2998                 phases = self._phases
2999                 eapi = pkg.metadata["EAPI"]
3000                 if eapi in ("0", "1"):
3001                         # skip src_prepare and src_configure
3002                         phases = phases[2:]
3003
3004                 for phase in phases:
3005                         ebuild_phases.add(EbuildPhase(background=self.background,
3006                                 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3007                                 settings=self.settings, tree=self._tree))
3008
3009                 self._start_task(ebuild_phases, self._default_final_exit)
3010
3011 class EbuildMetadataPhase(SubProcess):
3012
3013         """
3014         Asynchronous interface for the ebuild "depend" phase which is
3015         used to extract metadata from the ebuild.
3016         """
3017
3018         __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3019                 "ebuild_mtime", "portdb", "repo_path", "settings") + \
3020                 ("_raw_metadata",)
3021
3022         _file_names = ("ebuild",)
3023         _files_dict = slot_dict_class(_file_names, prefix="")
3024         _metadata_fd = 9
3025
3026         def _start(self):
3027                 settings = self.settings
3028                 settings.reset()
3029                 ebuild_path = self.ebuild_path
3030                 debug = settings.get("PORTAGE_DEBUG") == "1"
3031                 master_fd = None
3032                 slave_fd = None
3033                 fd_pipes = None
3034                 if self.fd_pipes is not None:
3035                         fd_pipes = self.fd_pipes.copy()
3036                 else:
3037                         fd_pipes = {}
3038
3039                 fd_pipes.setdefault(0, sys.stdin.fileno())
3040                 fd_pipes.setdefault(1, sys.stdout.fileno())
3041                 fd_pipes.setdefault(2, sys.stderr.fileno())
3042
3043                 # flush any pending output
3044                 for fd in fd_pipes.itervalues():
3045                         if fd == sys.stdout.fileno():
3046                                 sys.stdout.flush()
3047                         if fd == sys.stderr.fileno():
3048                                 sys.stderr.flush()
3049
3050                 fd_pipes_orig = fd_pipes.copy()
3051                 self._files = self._files_dict()
3052                 files = self._files
3053
3054                 master_fd, slave_fd = os.pipe()
3055                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3056                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3057
3058                 fd_pipes[self._metadata_fd] = slave_fd
3059
3060                 self._raw_metadata = []
3061                 files.ebuild = os.fdopen(master_fd, 'r')
3062                 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3063                         self._registered_events, self._output_handler)
3064                 self._registered = True
3065
3066                 retval = portage.doebuild(ebuild_path, "depend",
3067                         settings["ROOT"], settings, debug,
3068                         mydbapi=self.portdb, tree="porttree",
3069                         fd_pipes=fd_pipes, returnpid=True)
3070
3071                 os.close(slave_fd)
3072
3073                 if isinstance(retval, int):
3074                         # doebuild failed before spawning
3075                         self._unregister()
3076                         self.returncode = retval
3077                         self.wait()
3078                         return
3079
3080                 self.pid = retval[0]
3081                 portage.process.spawned_pids.remove(self.pid)
3082
3083         def _output_handler(self, fd, event):
3084
3085                 if event & PollConstants.POLLIN:
3086                         self._raw_metadata.append(self._files.ebuild.read())
3087                         if not self._raw_metadata[-1]:
3088                                 self._unregister()
3089                                 self.wait()
3090
3091                 self._unregister_if_appropriate(event)
3092                 return self._registered
3093
3094         def _set_returncode(self, wait_retval):
3095                 SubProcess._set_returncode(self, wait_retval)
3096                 if self.returncode == os.EX_OK:
3097                         metadata_lines = "".join(self._raw_metadata).splitlines()
3098                         if len(portage.auxdbkeys) != len(metadata_lines):
3099                                 # Don't trust bash's returncode if the
3100                                 # number of lines is incorrect.
3101                                 self.returncode = 1
3102                         else:
3103                                 metadata = izip(portage.auxdbkeys, metadata_lines)
3104                                 self.metadata_callback(self.cpv, self.ebuild_path,
3105                                         self.repo_path, metadata, self.ebuild_mtime)
3106
3107 class EbuildProcess(SpawnProcess):
3108
3109         __slots__ = ("phase", "pkg", "settings", "tree")
3110
3111         def _start(self):
3112                 # Don't open the log file during the clean phase since the
3113                 # open file can result in an nfs lock on $T/build.log which
3114                 # prevents the clean phase from removing $T.
3115                 if self.phase not in ("clean", "cleanrm"):
3116                         self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3117                 SpawnProcess._start(self)
3118
3119         def _pipe(self, fd_pipes):
3120                 stdout_pipe = fd_pipes.get(1)
3121                 got_pty, master_fd, slave_fd = \
3122                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3123                 return (master_fd, slave_fd)
3124
3125         def _spawn(self, args, **kwargs):
3126
3127                 root_config = self.pkg.root_config
3128                 tree = self.tree
3129                 mydbapi = root_config.trees[tree].dbapi
3130                 settings = self.settings
3131                 ebuild_path = settings["EBUILD"]
3132                 debug = settings.get("PORTAGE_DEBUG") == "1"
3133
3134                 rval = portage.doebuild(ebuild_path, self.phase,
3135                         root_config.root, settings, debug,
3136                         mydbapi=mydbapi, tree=tree, **kwargs)
3137
3138                 return rval
3139
3140         def _set_returncode(self, wait_retval):
3141                 SpawnProcess._set_returncode(self, wait_retval)
3142
3143                 if self.phase not in ("clean", "cleanrm"):
3144                         self.returncode = portage._doebuild_exit_status_check_and_log(
3145                                 self.settings, self.phase, self.returncode)
3146
3147                 if self.phase == "test" and self.returncode != os.EX_OK and \
3148                         "test-fail-continue" in self.settings.features:
3149                         self.returncode = os.EX_OK
3150
3151                 portage._post_phase_userpriv_perms(self.settings)
3152
3153 class EbuildPhase(CompositeTask):
3154
3155         __slots__ = ("background", "pkg", "phase",
3156                 "scheduler", "settings", "tree")
3157
3158         _post_phase_cmds = portage._post_phase_cmds
3159
3160         def _start(self):
3161
3162                 ebuild_process = EbuildProcess(background=self.background,
3163                         pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3164                         settings=self.settings, tree=self.tree)
3165
3166                 self._start_task(ebuild_process, self._ebuild_exit)
3167
3168         def _ebuild_exit(self, ebuild_process):
3169
3170                 if self.phase == "install":
3171                         out = None
3172                         log_path = self.settings.get("PORTAGE_LOG_FILE")
3173                         log_file = None
3174                         if self.background and log_path is not None:
3175                                 log_file = open(log_path, 'a')
3176                                 out = log_file
3177                         try:
3178                                 portage._check_build_log(self.settings, out=out)
3179                         finally:
3180                                 if log_file is not None:
3181                                         log_file.close()
3182
3183                 if self._default_exit(ebuild_process) != os.EX_OK:
3184                         self.wait()
3185                         return
3186
3187                 settings = self.settings
3188
3189                 if self.phase == "install":
3190                         portage._post_src_install_uid_fix(settings)
3191
3192                 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3193                 if post_phase_cmds is not None:
3194                         post_phase = MiscFunctionsProcess(background=self.background,
3195                                 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3196                                 scheduler=self.scheduler, settings=settings)
3197                         self._start_task(post_phase, self._post_phase_exit)
3198                         return
3199
3200                 self.returncode = ebuild_process.returncode
3201                 self._current_task = None
3202                 self.wait()
3203
3204         def _post_phase_exit(self, post_phase):
3205                 if self._final_exit(post_phase) != os.EX_OK:
3206                         writemsg("!!! post %s failed; exiting.\n" % self.phase,
3207                                 noiselevel=-1)
3208                 self._current_task = None
3209                 self.wait()
3210                 return
3211
3212 class EbuildBinpkg(EbuildProcess):
3213         """
3214         This assumes that src_install() has successfully completed.
3215         """
3216         __slots__ = ("_binpkg_tmpfile",)
3217
3218         def _start(self):
3219                 self.phase = "package"
3220                 self.tree = "porttree"
3221                 pkg = self.pkg
3222                 root_config = pkg.root_config
3223                 portdb = root_config.trees["porttree"].dbapi
3224                 bintree = root_config.trees["bintree"]
3225                 ebuild_path = portdb.findname(self.pkg.cpv)
3226                 settings = self.settings
3227                 debug = settings.get("PORTAGE_DEBUG") == "1"
3228
3229                 bintree.prevent_collision(pkg.cpv)
3230                 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3231                         pkg.cpv + ".tbz2." + str(os.getpid()))
3232                 self._binpkg_tmpfile = binpkg_tmpfile
3233                 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3234                 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3235
3236                 try:
3237                         EbuildProcess._start(self)
3238                 finally:
3239                         settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3240
3241         def _set_returncode(self, wait_retval):
3242                 EbuildProcess._set_returncode(self, wait_retval)
3243
3244                 pkg = self.pkg
3245                 bintree = pkg.root_config.trees["bintree"]
3246                 binpkg_tmpfile = self._binpkg_tmpfile
3247                 if self.returncode == os.EX_OK:
3248                         bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3249
3250 class EbuildMerge(SlotObject):
3251
3252         __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3253                 "pkg", "pkg_count", "pkg_path", "pretend",
3254                 "scheduler", "settings", "tree", "world_atom")
3255
3256         def execute(self):
3257                 root_config = self.pkg.root_config
3258                 settings = self.settings
3259                 retval = portage.merge(settings["CATEGORY"],
3260                         settings["PF"], settings["D"],
3261                         os.path.join(settings["PORTAGE_BUILDDIR"],
3262                         "build-info"), root_config.root, settings,
3263                         myebuild=settings["EBUILD"],
3264                         mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3265                         vartree=root_config.trees["vartree"],
3266                         prev_mtimes=self.ldpath_mtimes,
3267                         scheduler=self.scheduler,
3268                         blockers=self.find_blockers)
3269
3270                 if retval == os.EX_OK:
3271                         self.world_atom(self.pkg)
3272                         self._log_success()
3273
3274                 return retval
3275
3276         def _log_success(self):
3277                 pkg = self.pkg
3278                 pkg_count = self.pkg_count
3279                 pkg_path = self.pkg_path
3280                 logger = self.logger
3281                 if "noclean" not in self.settings.features:
3282                         short_msg = "emerge: (%s of %s) %s Clean Post" % \
3283                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3284                         logger.log((" === (%s of %s) " + \
3285                                 "Post-Build Cleaning (%s::%s)") % \
3286                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3287                                 short_msg=short_msg)
3288                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3289                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3290
3291 class PackageUninstall(AsynchronousTask):
3292
3293         __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3294
3295         def _start(self):
3296                 try:
3297                         unmerge(self.pkg.root_config, self.opts, "unmerge",
3298                                 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3299                                 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3300                                 writemsg_level=self._writemsg_level)
3301                 except UninstallFailure, e:
3302                         self.returncode = e.status
3303                 else:
3304                         self.returncode = os.EX_OK
3305                 self.wait()
3306
3307         def _writemsg_level(self, msg, level=0, noiselevel=0):
3308
3309                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3310                 background = self.background
3311
3312                 if log_path is None:
3313                         if not (background and level < logging.WARNING):
3314                                 portage.util.writemsg_level(msg,
3315                                         level=level, noiselevel=noiselevel)
3316                 else:
3317                         if not background:
3318                                 portage.util.writemsg_level(msg,
3319                                         level=level, noiselevel=noiselevel)
3320
3321                         f = open(log_path, 'a')
3322                         try:
3323                                 f.write(msg)
3324                         finally:
3325                                 f.close()
3326
3327 class Binpkg(CompositeTask):
3328
3329         __slots__ = ("find_blockers",
3330                 "ldpath_mtimes", "logger", "opts",
3331                 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3332                 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3333                 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3334
3335         def _writemsg_level(self, msg, level=0, noiselevel=0):
3336
3337                 if not self.background:
3338                         portage.util.writemsg_level(msg,
3339                                 level=level, noiselevel=noiselevel)
3340
3341                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3342                 if  log_path is not None:
3343                         f = open(log_path, 'a')
3344                         try:
3345                                 f.write(msg)
3346                         finally:
3347                                 f.close()
3348
3349         def _start(self):
3350
3351                 pkg = self.pkg
3352                 settings = self.settings
3353                 settings.setcpv(pkg)
3354                 self._tree = "bintree"
3355                 self._bintree = self.pkg.root_config.trees[self._tree]
3356                 self._verify = not self.opts.pretend
3357
3358                 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3359                         "portage", pkg.category, pkg.pf)
3360                 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3361                         pkg=pkg, settings=settings)
3362                 self._image_dir = os.path.join(dir_path, "image")
3363                 self._infloc = os.path.join(dir_path, "build-info")
3364                 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3365                 settings["EBUILD"] = self._ebuild_path
3366                 debug = settings.get("PORTAGE_DEBUG") == "1"
3367                 portage.doebuild_environment(self._ebuild_path, "setup",
3368                         settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3369                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3370
3371                 # The prefetcher has already completed or it
3372                 # could be running now. If it's running now,
3373                 # wait for it to complete since it holds
3374                 # a lock on the file being fetched. The
3375                 # portage.locks functions are only designed
3376                 # to work between separate processes. Since
3377                 # the lock is held by the current process,
3378                 # use the scheduler and fetcher methods to
3379                 # synchronize with the fetcher.
3380                 prefetcher = self.prefetcher
3381                 if prefetcher is None:
3382                         pass
3383                 elif not prefetcher.isAlive():
3384                         prefetcher.cancel()
3385                 elif prefetcher.poll() is None:
3386
3387                         waiting_msg = ("Fetching '%s' " + \
3388                                 "in the background. " + \
3389                                 "To view fetch progress, run `tail -f " + \
3390                                 "/var/log/emerge-fetch.log` in another " + \
3391                                 "terminal.") % prefetcher.pkg_path
3392                         msg_prefix = colorize("GOOD", " * ")
3393                         from textwrap import wrap
3394                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3395                                 for line in wrap(waiting_msg, 65))
3396                         if not self.background:
3397                                 writemsg(waiting_msg, noiselevel=-1)
3398
3399                         self._current_task = prefetcher
3400                         prefetcher.addExitListener(self._prefetch_exit)
3401                         return
3402
3403                 self._prefetch_exit(prefetcher)
3404
3405         def _prefetch_exit(self, prefetcher):
3406
3407                 pkg = self.pkg
3408                 pkg_count = self.pkg_count
3409                 if not (self.opts.pretend or self.opts.fetchonly):
3410                         self._build_dir.lock()
3411                         try:
3412                                 shutil.rmtree(self._build_dir.dir_path)
3413                         except EnvironmentError, e:
3414                                 if e.errno != errno.ENOENT:
3415                                         raise
3416                                 del e
3417                         portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3418                 fetcher = BinpkgFetcher(background=self.background,
3419                         logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3420                         pretend=self.opts.pretend, scheduler=self.scheduler)
3421                 pkg_path = fetcher.pkg_path
3422                 self._pkg_path = pkg_path
3423
3424                 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3425
3426                         msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3427                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3428                         short_msg = "emerge: (%s of %s) %s Fetch" % \
3429                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3430                         self.logger.log(msg, short_msg=short_msg)
3431                         self._start_task(fetcher, self._fetcher_exit)
3432                         return
3433
3434                 self._fetcher_exit(fetcher)
3435
3436         def _fetcher_exit(self, fetcher):
3437
3438                 # The fetcher only has a returncode when
3439                 # --getbinpkg is enabled.
3440                 if fetcher.returncode is not None:
3441                         self._fetched_pkg = True
3442                         if self._default_exit(fetcher) != os.EX_OK:
3443                                 self._unlock_builddir()
3444                                 self.wait()
3445                                 return
3446
3447                 if self.opts.pretend:
3448                         self._current_task = None
3449                         self.returncode = os.EX_OK
3450                         self.wait()
3451                         return
3452
3453                 verifier = None
3454                 if self._verify:
3455                         logfile = None
3456                         if self.background:
3457                                 logfile = self.settings.get("PORTAGE_LOG_FILE")
3458                         verifier = BinpkgVerifier(background=self.background,
3459                                 logfile=logfile, pkg=self.pkg)
3460                         self._start_task(verifier, self._verifier_exit)
3461                         return
3462
3463                 self._verifier_exit(verifier)
3464
3465         def _verifier_exit(self, verifier):
3466                 if verifier is not None and \
3467                         self._default_exit(verifier) != os.EX_OK:
3468                         self._unlock_builddir()
3469                         self.wait()
3470                         return
3471
3472                 logger = self.logger
3473                 pkg = self.pkg
3474                 pkg_count = self.pkg_count
3475                 pkg_path = self._pkg_path
3476
3477                 if self._fetched_pkg:
3478                         self._bintree.inject(pkg.cpv, filename=pkg_path)
3479
3480                 if self.opts.fetchonly:
3481                         self._current_task = None
3482                         self.returncode = os.EX_OK
3483                         self.wait()
3484                         return
3485
3486                 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3487                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3488                 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3489                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3490                 logger.log(msg, short_msg=short_msg)
3491
3492                 phase = "clean"
3493                 settings = self.settings
3494                 ebuild_phase = EbuildPhase(background=self.background,
3495                         pkg=pkg, phase=phase, scheduler=self.scheduler,
3496                         settings=settings, tree=self._tree)
3497
3498                 self._start_task(ebuild_phase, self._clean_exit)
3499
3500         def _clean_exit(self, clean_phase):
3501                 if self._default_exit(clean_phase) != os.EX_OK:
3502                         self._unlock_builddir()
3503                         self.wait()
3504                         return
3505
3506                 dir_path = self._build_dir.dir_path
3507
3508                 try:
3509                         shutil.rmtree(dir_path)
3510                 except (IOError, OSError), e:
3511                         if e.errno != errno.ENOENT:
3512                                 raise
3513                         del e
3514
3515                 infloc = self._infloc
3516                 pkg = self.pkg
3517                 pkg_path = self._pkg_path
3518
3519                 dir_mode = 0755
3520                 for mydir in (dir_path, self._image_dir, infloc):
3521                         portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3522                                 gid=portage.data.portage_gid, mode=dir_mode)
3523
3524                 # This initializes PORTAGE_LOG_FILE.
3525                 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3526                 self._writemsg_level(">>> Extracting info\n")
3527
3528                 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3529                 check_missing_metadata = ("CATEGORY", "PF")
3530                 missing_metadata = set()
3531                 for k in check_missing_metadata:
3532                         v = pkg_xpak.getfile(k)
3533                         if not v:
3534                                 missing_metadata.add(k)
3535
3536                 pkg_xpak.unpackinfo(infloc)
3537                 for k in missing_metadata:
3538                         if k == "CATEGORY":
3539                                 v = pkg.category
3540                         elif k == "PF":
3541                                 v = pkg.pf
3542                         else:
3543                                 continue
3544
3545                         f = open(os.path.join(infloc, k), 'wb')
3546                         try:
3547                                 f.write(v + "\n")
3548                         finally:
3549                                 f.close()
3550
3551                 # Store the md5sum in the vdb.
3552                 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3553                 try:
3554                         f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3555                 finally:
3556                         f.close()
3557
3558                 # This gives bashrc users an opportunity to do various things
3559                 # such as remove binary packages after they're installed.
3560                 settings = self.settings
3561                 settings.setcpv(self.pkg)
3562                 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3563                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3564
3565                 phase = "setup"
3566                 setup_phase = EbuildPhase(background=self.background,
3567                         pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3568                         settings=settings, tree=self._tree)
3569
3570                 setup_phase.addExitListener(self._setup_exit)
3571                 self._current_task = setup_phase
3572                 self.scheduler.scheduleSetup(setup_phase)
3573
3574         def _setup_exit(self, setup_phase):
3575                 if self._default_exit(setup_phase) != os.EX_OK:
3576                         self._unlock_builddir()
3577                         self.wait()
3578                         return
3579
3580                 extractor = BinpkgExtractorAsync(background=self.background,
3581                         image_dir=self._image_dir,
3582                         pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3583                 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3584                 self._start_task(extractor, self._extractor_exit)
3585
3586         def _extractor_exit(self, extractor):
3587                 if self._final_exit(extractor) != os.EX_OK:
3588                         self._unlock_builddir()
3589                         writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3590                                 noiselevel=-1)
3591                 self.wait()
3592
3593         def _unlock_builddir(self):
3594                 if self.opts.pretend or self.opts.fetchonly:
3595                         return
3596                 portage.elog.elog_process(self.pkg.cpv, self.settings)
3597                 self._build_dir.unlock()
3598
3599         def install(self):
3600
3601                 # This gives bashrc users an opportunity to do various things
3602                 # such as remove binary packages after they're installed.
3603                 settings = self.settings
3604                 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3605                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3606
3607                 merge = EbuildMerge(find_blockers=self.find_blockers,
3608                         ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3609                         pkg=self.pkg, pkg_count=self.pkg_count,
3610                         pkg_path=self._pkg_path, scheduler=self.scheduler,
3611                         settings=settings, tree=self._tree, world_atom=self.world_atom)
3612
3613                 try:
3614                         retval = merge.execute()
3615                 finally:
3616                         settings.pop("PORTAGE_BINPKG_FILE", None)
3617                         self._unlock_builddir()
3618                 return retval
3619
3620 class BinpkgFetcher(SpawnProcess):
3621
3622         __slots__ = ("pkg", "pretend",
3623                 "locked", "pkg_path", "_lock_obj")
3624
3625         def __init__(self, **kwargs):
3626                 SpawnProcess.__init__(self, **kwargs)
3627                 pkg = self.pkg
3628                 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3629
3630         def _start(self):
3631
3632                 if self.cancelled:
3633                         return
3634
3635                 pkg = self.pkg
3636                 pretend = self.pretend
3637                 bintree = pkg.root_config.trees["bintree"]
3638                 settings = bintree.settings
3639                 use_locks = "distlocks" in settings.features
3640                 pkg_path = self.pkg_path
3641
3642                 if not pretend:
3643                         portage.util.ensure_dirs(os.path.dirname(pkg_path))
3644                         if use_locks:
3645                                 self.lock()
3646                 exists = os.path.exists(pkg_path)
3647                 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3648                 if not (pretend or resume):
3649                         # Remove existing file or broken symlink.
3650                         try:
3651                                 os.unlink(pkg_path)
3652                         except OSError:
3653                                 pass
3654
3655                 # urljoin doesn't work correctly with
3656                 # unrecognized protocols like sftp
3657                 if bintree._remote_has_index:
3658                         rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3659                         if not rel_uri:
3660                                 rel_uri = pkg.cpv + ".tbz2"
3661                         uri = bintree._remote_base_uri.rstrip("/") + \
3662                                 "/" + rel_uri.lstrip("/")
3663                 else:
3664                         uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3665                                 "/" + pkg.pf + ".tbz2"
3666
3667                 if pretend:
3668                         portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3669                         self.returncode = os.EX_OK
3670                         self.wait()
3671                         return
3672
3673                 protocol = urlparse.urlparse(uri)[0]
3674                 fcmd_prefix = "FETCHCOMMAND"
3675                 if resume:
3676                         fcmd_prefix = "RESUMECOMMAND"
3677                 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3678                 if not fcmd:
3679                         fcmd = settings.get(fcmd_prefix)
3680
3681                 fcmd_vars = {
3682                         "DISTDIR" : os.path.dirname(pkg_path),
3683                         "URI"     : uri,
3684                         "FILE"    : os.path.basename(pkg_path)
3685                 }
3686
3687                 fetch_env = dict(settings.iteritems())
3688                 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3689                         for x in shlex.split(fcmd)]
3690
3691                 if self.fd_pipes is None:
3692                         self.fd_pipes = {}
3693                 fd_pipes = self.fd_pipes
3694
3695                 # Redirect all output to stdout since some fetchers like
3696                 # wget pollute stderr (if portage detects a problem then it
3697                 # can send it's own message to stderr).
3698                 fd_pipes.setdefault(0, sys.stdin.fileno())
3699                 fd_pipes.setdefault(1, sys.stdout.fileno())
3700                 fd_pipes.setdefault(2, sys.stdout.fileno())
3701
3702                 self.args = fetch_args
3703                 self.env = fetch_env
3704                 SpawnProcess._start(self)
3705
3706         def _set_returncode(self, wait_retval):
3707                 SpawnProcess._set_returncode(self, wait_retval)
3708                 if self.returncode == os.EX_OK:
3709                         # If possible, update the mtime to match the remote package if
3710                         # the fetcher didn't already do it automatically.
3711                         bintree = self.pkg.root_config.trees["bintree"]
3712                         if bintree._remote_has_index:
3713                                 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3714                                 if remote_mtime is not None:
3715                                         try:
3716                                                 remote_mtime = long(remote_mtime)
3717                                         except ValueError:
3718                                                 pass
3719                                         else:
3720                                                 try:
3721                                                         local_mtime = long(os.stat(self.pkg_path).st_mtime)
3722                                                 except OSError:
3723                                                         pass
3724                                                 else:
3725                                                         if remote_mtime != local_mtime:
3726                                                                 try:
3727                                                                         os.utime(self.pkg_path,
3728                                                                                 (remote_mtime, remote_mtime))
3729                                                                 except OSError:
3730                                                                         pass
3731
3732                 if self.locked:
3733                         self.unlock()
3734
3735         def lock(self):
3736                 """
3737                 This raises an AlreadyLocked exception if lock() is called
3738                 while a lock is already held. In order to avoid this, call
3739                 unlock() or check whether the "locked" attribute is True
3740                 or False before calling lock().
3741                 """
3742                 if self._lock_obj is not None:
3743                         raise self.AlreadyLocked((self._lock_obj,))
3744
3745                 self._lock_obj = portage.locks.lockfile(
3746                         self.pkg_path, wantnewlockfile=1)
3747                 self.locked = True
3748
3749         class AlreadyLocked(portage.exception.PortageException):
3750                 pass
3751
3752         def unlock(self):
3753                 if self._lock_obj is None:
3754                         return
3755                 portage.locks.unlockfile(self._lock_obj)
3756                 self._lock_obj = None
3757                 self.locked = False
3758
3759 class BinpkgVerifier(AsynchronousTask):
3760         __slots__ = ("logfile", "pkg",)
3761
3762         def _start(self):
3763                 """
3764                 Note: Unlike a normal AsynchronousTask.start() method,
3765                 this one does all work is synchronously. The returncode
3766                 attribute will be set before it returns.
3767                 """
3768
3769                 pkg = self.pkg
3770                 root_config = pkg.root_config
3771                 bintree = root_config.trees["bintree"]
3772                 rval = os.EX_OK
3773                 stdout_orig = sys.stdout
3774                 stderr_orig = sys.stderr
3775                 log_file = None
3776                 if self.background and self.logfile is not None:
3777                         log_file = open(self.logfile, 'a')
3778                 try:
3779                         if log_file is not None:
3780                                 sys.stdout = log_file
3781                                 sys.stderr = log_file
3782                         try:
3783                                 bintree.digestCheck(pkg)
3784                         except portage.exception.FileNotFound:
3785                                 writemsg("!!! Fetching Binary failed " + \
3786                                         "for '%s'\n" % pkg.cpv, noiselevel=-1)
3787                                 rval = 1
3788                         except portage.exception.DigestException, e:
3789                                 writemsg("\n!!! Digest verification failed:\n",
3790                                         noiselevel=-1)
3791                                 writemsg("!!! %s\n" % e.value[0],
3792                                         noiselevel=-1)
3793                                 writemsg("!!! Reason: %s\n" % e.value[1],
3794                                         noiselevel=-1)
3795                                 writemsg("!!! Got: %s\n" % e.value[2],
3796                                         noiselevel=-1)
3797                                 writemsg("!!! Expected: %s\n" % e.value[3],
3798                                         noiselevel=-1)
3799                                 rval = 1
3800                         if rval != os.EX_OK:
3801                                 pkg_path = bintree.getname(pkg.cpv)
3802                                 head, tail = os.path.split(pkg_path)
3803                                 temp_filename = portage._checksum_failure_temp_file(head, tail)
3804                                 writemsg("File renamed to '%s'\n" % (temp_filename,),
3805                                         noiselevel=-1)
3806                 finally:
3807                         sys.stdout = stdout_orig
3808                         sys.stderr = stderr_orig
3809                         if log_file is not None:
3810                                 log_file.close()
3811
3812                 self.returncode = rval
3813                 self.wait()
3814
3815 class BinpkgPrefetcher(CompositeTask):
3816
3817         __slots__ = ("pkg",) + \
3818                 ("pkg_path", "_bintree",)
3819
3820         def _start(self):
3821                 self._bintree = self.pkg.root_config.trees["bintree"]
3822                 fetcher = BinpkgFetcher(background=self.background,
3823                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3824                         scheduler=self.scheduler)
3825                 self.pkg_path = fetcher.pkg_path
3826                 self._start_task(fetcher, self._fetcher_exit)
3827
3828         def _fetcher_exit(self, fetcher):
3829
3830                 if self._default_exit(fetcher) != os.EX_OK:
3831                         self.wait()
3832                         return
3833
3834                 verifier = BinpkgVerifier(background=self.background,
3835                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3836                 self._start_task(verifier, self._verifier_exit)
3837
3838         def _verifier_exit(self, verifier):
3839                 if self._default_exit(verifier) != os.EX_OK:
3840                         self.wait()
3841                         return
3842
3843                 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3844
3845                 self._current_task = None
3846                 self.returncode = os.EX_OK
3847                 self.wait()
3848
3849 class BinpkgExtractorAsync(SpawnProcess):
3850
3851         __slots__ = ("image_dir", "pkg", "pkg_path")
3852
3853         _shell_binary = portage.const.BASH_BINARY
3854
3855         def _start(self):
3856                 self.args = [self._shell_binary, "-c",
3857                         "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3858                         (portage._shell_quote(self.pkg_path),
3859                         portage._shell_quote(self.image_dir))]
3860
3861                 self.env = self.pkg.root_config.settings.environ()
3862                 SpawnProcess._start(self)
3863
3864 class MergeListItem(CompositeTask):
3865
3866         """
3867         TODO: For parallel scheduling, everything here needs asynchronous
3868         execution support (start, poll, and wait methods).
3869         """
3870
3871         __slots__ = ("args_set",
3872                 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3873                 "find_blockers", "logger", "mtimedb", "pkg",
3874                 "pkg_count", "pkg_to_replace", "prefetcher",
3875                 "settings", "statusMessage", "world_atom") + \
3876                 ("_install_task",)
3877
3878         def _start(self):
3879
3880                 pkg = self.pkg
3881                 build_opts = self.build_opts
3882
3883                 if pkg.installed:
3884                         # uninstall,  executed by self.merge()
3885                         self.returncode = os.EX_OK
3886                         self.wait()
3887                         return
3888
3889                 args_set = self.args_set
3890                 find_blockers = self.find_blockers
3891                 logger = self.logger
3892                 mtimedb = self.mtimedb
3893                 pkg_count = self.pkg_count
3894                 scheduler = self.scheduler
3895                 settings = self.settings
3896                 world_atom = self.world_atom
3897                 ldpath_mtimes = mtimedb["ldpath"]
3898
3899                 action_desc = "Emerging"
3900                 preposition = "for"
3901                 if pkg.type_name == "binary":
3902                         action_desc += " binary"
3903
3904                 if build_opts.fetchonly:
3905                         action_desc = "Fetching"
3906
3907                 msg = "%s (%s of %s) %s" % \
3908                         (action_desc,
3909                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3910                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3911                         colorize("GOOD", pkg.cpv))
3912
3913                 portdb = pkg.root_config.trees["porttree"].dbapi
3914                 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3915                 if portdir_repo_name:
3916                         pkg_repo_name = pkg.metadata.get("repository")
3917                         if pkg_repo_name != portdir_repo_name:
3918                                 if not pkg_repo_name:
3919                                         pkg_repo_name = "unknown repo"
3920                                 msg += " from %s" % pkg_repo_name
3921
3922                 if pkg.root != "/":
3923                         msg += " %s %s" % (preposition, pkg.root)
3924
3925                 if not build_opts.pretend:
3926                         self.statusMessage(msg)
3927                         logger.log(" >>> emerge (%s of %s) %s to %s" % \
3928                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3929
3930                 if pkg.type_name == "ebuild":
3931
3932                         build = EbuildBuild(args_set=args_set,
3933                                 background=self.background,
3934                                 config_pool=self.config_pool,
3935                                 find_blockers=find_blockers,
3936                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3937                                 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3938                                 prefetcher=self.prefetcher, scheduler=scheduler,
3939                                 settings=settings, world_atom=world_atom)
3940
3941                         self._install_task = build
3942                         self._start_task(build, self._default_final_exit)
3943                         return
3944
3945                 elif pkg.type_name == "binary":
3946
3947                         binpkg = Binpkg(background=self.background,
3948                                 find_blockers=find_blockers,
3949                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3950                                 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3951                                 prefetcher=self.prefetcher, settings=settings,
3952                                 scheduler=scheduler, world_atom=world_atom)
3953
3954                         self._install_task = binpkg
3955                         self._start_task(binpkg, self._default_final_exit)
3956                         return
3957
3958         def _poll(self):
3959                 self._install_task.poll()
3960                 return self.returncode
3961
3962         def _wait(self):
3963                 self._install_task.wait()
3964                 return self.returncode
3965
3966         def merge(self):
3967
3968                 pkg = self.pkg
3969                 build_opts = self.build_opts
3970                 find_blockers = self.find_blockers
3971                 logger = self.logger
3972                 mtimedb = self.mtimedb
3973                 pkg_count = self.pkg_count
3974                 prefetcher = self.prefetcher
3975                 scheduler = self.scheduler
3976                 settings = self.settings
3977                 world_atom = self.world_atom
3978                 ldpath_mtimes = mtimedb["ldpath"]
3979
3980                 if pkg.installed:
3981                         if not (build_opts.buildpkgonly or \
3982                                 build_opts.fetchonly or build_opts.pretend):
3983
3984                                 uninstall = PackageUninstall(background=self.background,
3985                                         ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
3986                                         pkg=pkg, scheduler=scheduler, settings=settings)
3987
3988                                 uninstall.start()
3989                                 retval = uninstall.wait()
3990                                 if retval != os.EX_OK:
3991                                         return retval
3992                         return os.EX_OK
3993
3994                 if build_opts.fetchonly or \
3995                         build_opts.buildpkgonly:
3996                         return self.returncode
3997
3998                 retval = self._install_task.install()
3999                 return retval
4000
4001 class PackageMerge(AsynchronousTask):
4002         """
4003         TODO: Implement asynchronous merge so that the scheduler can
4004         run while a merge is executing.
4005         """
4006
4007         __slots__ = ("merge",)
4008
4009         def _start(self):
4010
4011                 pkg = self.merge.pkg
4012                 pkg_count = self.merge.pkg_count
4013
4014                 if pkg.installed:
4015                         action_desc = "Uninstalling"
4016                         preposition = "from"
4017                 else:
4018                         action_desc = "Installing"
4019                         preposition = "to"
4020
4021                 msg = "%s %s" % (action_desc, colorize("GOOD", pkg.cpv))
4022
4023                 if pkg.root != "/":
4024                         msg += " %s %s" % (preposition, pkg.root)
4025
4026                 if not self.merge.build_opts.fetchonly and \
4027                         not self.merge.build_opts.pretend and \
4028                         not self.merge.build_opts.buildpkgonly:
4029                         self.merge.statusMessage(msg)
4030
4031                 self.returncode = self.merge.merge()
4032                 self.wait()
4033
4034 class DependencyArg(object):
4035         def __init__(self, arg=None, root_config=None):
4036                 self.arg = arg
4037                 self.root_config = root_config
4038
4039         def __str__(self):
4040                 return str(self.arg)
4041
4042 class AtomArg(DependencyArg):
4043         def __init__(self, atom=None, **kwargs):
4044                 DependencyArg.__init__(self, **kwargs)
4045                 self.atom = atom
4046                 if not isinstance(self.atom, portage.dep.Atom):
4047                         self.atom = portage.dep.Atom(self.atom)
4048                 self.set = (self.atom, )
4049
4050 class PackageArg(DependencyArg):
4051         def __init__(self, package=None, **kwargs):
4052                 DependencyArg.__init__(self, **kwargs)
4053                 self.package = package
4054                 self.atom = portage.dep.Atom("=" + package.cpv)
4055                 self.set = (self.atom, )
4056
4057 class SetArg(DependencyArg):
4058         def __init__(self, set=None, **kwargs):
4059                 DependencyArg.__init__(self, **kwargs)
4060                 self.set = set
4061                 self.name = self.arg[len(SETPREFIX):]
4062
4063 class Dependency(SlotObject):
4064         __slots__ = ("atom", "blocker", "depth",
4065                 "parent", "onlydeps", "priority", "root")
4066         def __init__(self, **kwargs):
4067                 SlotObject.__init__(self, **kwargs)
4068                 if self.priority is None:
4069                         self.priority = DepPriority()
4070                 if self.depth is None:
4071                         self.depth = 0
4072
4073 class BlockerCache(portage.cache.mappings.MutableMapping):
4074         """This caches blockers of installed packages so that dep_check does not
4075         have to be done for every single installed package on every invocation of
4076         emerge.  The cache is invalidated whenever it is detected that something
4077         has changed that might alter the results of dep_check() calls:
4078                 1) the set of installed packages (including COUNTER) has changed
4079                 2) the old-style virtuals have changed
4080         """
4081
4082         # Number of uncached packages to trigger cache update, since
4083         # it's wasteful to update it for every vdb change.
4084         _cache_threshold = 5
4085
4086         class BlockerData(object):
4087
4088                 __slots__ = ("__weakref__", "atoms", "counter")
4089
4090                 def __init__(self, counter, atoms):
4091                         self.counter = counter
4092                         self.atoms = atoms
4093
4094         def __init__(self, myroot, vardb):
4095                 self._vardb = vardb
4096                 self._virtuals = vardb.settings.getvirtuals()
4097                 self._cache_filename = os.path.join(myroot,
4098                         portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4099                 self._cache_version = "1"
4100                 self._cache_data = None
4101                 self._modified = set()
4102                 self._load()
4103
4104         def _load(self):
4105                 try:
4106                         f = open(self._cache_filename, mode='rb')
4107                         mypickle = pickle.Unpickler(f)
4108                         try:
4109                                 mypickle.find_global = None
4110                         except AttributeError:
4111                                 # TODO: If py3k, override Unpickler.find_class().
4112                                 pass
4113                         self._cache_data = mypickle.load()
4114                         f.close()
4115                         del f
4116                 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4117                         if isinstance(e, pickle.UnpicklingError):
4118                                 writemsg("!!! Error loading '%s': %s\n" % \
4119                                         (self._cache_filename, str(e)), noiselevel=-1)
4120                         del e
4121
4122                 cache_valid = self._cache_data and \
4123                         isinstance(self._cache_data, dict) and \
4124                         self._cache_data.get("version") == self._cache_version and \
4125                         isinstance(self._cache_data.get("blockers"), dict)
4126                 if cache_valid:
4127                         # Validate all the atoms and counters so that
4128                         # corruption is detected as soon as possible.
4129                         invalid_items = set()
4130                         for k, v in self._cache_data["blockers"].iteritems():
4131                                 if not isinstance(k, basestring):
4132                                         invalid_items.add(k)
4133                                         continue
4134                                 try:
4135                                         if portage.catpkgsplit(k) is None:
4136                                                 invalid_items.add(k)
4137                                                 continue
4138                                 except portage.exception.InvalidData:
4139                                         invalid_items.add(k)
4140                                         continue
4141                                 if not isinstance(v, tuple) or \
4142                                         len(v) != 2:
4143                                         invalid_items.add(k)
4144                                         continue
4145                                 counter, atoms = v
4146                                 if not isinstance(counter, (int, long)):
4147                                         invalid_items.add(k)
4148                                         continue
4149                                 if not isinstance(atoms, (list, tuple)):
4150                                         invalid_items.add(k)
4151                                         continue
4152                                 invalid_atom = False
4153                                 for atom in atoms:
4154                                         if not isinstance(atom, basestring):
4155                                                 invalid_atom = True
4156                                                 break
4157                                         if atom[:1] != "!" or \
4158                                                 not portage.isvalidatom(
4159                                                 atom, allow_blockers=True):
4160                                                 invalid_atom = True
4161                                                 break
4162                                 if invalid_atom:
4163                                         invalid_items.add(k)
4164                                         continue
4165
4166                         for k in invalid_items:
4167                                 del self._cache_data["blockers"][k]
4168                         if not self._cache_data["blockers"]:
4169                                 cache_valid = False
4170
4171                 if not cache_valid:
4172                         self._cache_data = {"version":self._cache_version}
4173                         self._cache_data["blockers"] = {}
4174                         self._cache_data["virtuals"] = self._virtuals
4175                 self._modified.clear()
4176
4177         def flush(self):
4178                 """If the current user has permission and the internal blocker cache
4179                 been updated, save it to disk and mark it unmodified.  This is called
4180                 by emerge after it has proccessed blockers for all installed packages.
4181                 Currently, the cache is only written if the user has superuser
4182                 privileges (since that's required to obtain a lock), but all users
4183                 have read access and benefit from faster blocker lookups (as long as
4184                 the entire cache is still valid).  The cache is stored as a pickled
4185                 dict object with the following format:
4186
4187                 {
4188                         version : "1",
4189                         "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4190                         "virtuals" : vardb.settings.getvirtuals()
4191                 }
4192                 """
4193                 if len(self._modified) >= self._cache_threshold and \
4194                         secpass >= 2:
4195                         try:
4196                                 f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4197                                 pickle.dump(self._cache_data, f, -1)
4198                                 f.close()
4199                                 portage.util.apply_secpass_permissions(
4200                                         self._cache_filename, gid=portage.portage_gid, mode=0644)
4201                         except (IOError, OSError), e:
4202                                 pass
4203                         self._modified.clear()
4204
4205         def __setitem__(self, cpv, blocker_data):
4206                 """
4207                 Update the cache and mark it as modified for a future call to
4208                 self.flush().
4209
4210                 @param cpv: Package for which to cache blockers.
4211                 @type cpv: String
4212                 @param blocker_data: An object with counter and atoms attributes.
4213                 @type blocker_data: BlockerData
4214                 """
4215                 self._cache_data["blockers"][cpv] = \
4216                         (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4217                 self._modified.add(cpv)
4218
4219         def __iter__(self):
4220                 if self._cache_data is None:
4221                         # triggered by python-trace
4222                         return iter([])
4223                 return iter(self._cache_data["blockers"])
4224
4225         def __delitem__(self, cpv):
4226                 del self._cache_data["blockers"][cpv]
4227
4228         def __getitem__(self, cpv):
4229                 """
4230                 @rtype: BlockerData
4231                 @returns: An object with counter and atoms attributes.
4232                 """
4233                 return self.BlockerData(*self._cache_data["blockers"][cpv])
4234
4235 class BlockerDB(object):
4236
4237         def __init__(self, root_config):
4238                 self._root_config = root_config
4239                 self._vartree = root_config.trees["vartree"]
4240                 self._portdb = root_config.trees["porttree"].dbapi
4241
4242                 self._dep_check_trees = None
4243                 self._fake_vartree = None
4244
4245         def _get_fake_vartree(self, acquire_lock=0):
4246                 fake_vartree = self._fake_vartree
4247                 if fake_vartree is None:
4248                         fake_vartree = FakeVartree(self._root_config,
4249                                 acquire_lock=acquire_lock)
4250                         self._fake_vartree = fake_vartree
4251                         self._dep_check_trees = { self._vartree.root : {
4252                                 "porttree"    :  fake_vartree,
4253                                 "vartree"     :  fake_vartree,
4254                         }}
4255                 else:
4256                         fake_vartree.sync(acquire_lock=acquire_lock)
4257                 return fake_vartree
4258
4259         def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4260                 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4261                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4262                 settings = self._vartree.settings
4263                 stale_cache = set(blocker_cache)
4264                 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4265                 dep_check_trees = self._dep_check_trees
4266                 vardb = fake_vartree.dbapi
4267                 installed_pkgs = list(vardb)
4268
4269                 for inst_pkg in installed_pkgs:
4270                         stale_cache.discard(inst_pkg.cpv)
4271                         cached_blockers = blocker_cache.get(inst_pkg.cpv)
4272                         if cached_blockers is not None and \
4273                                 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4274                                 cached_blockers = None
4275                         if cached_blockers is not None:
4276                                 blocker_atoms = cached_blockers.atoms
4277                         else:
4278                                 # Use aux_get() to trigger FakeVartree global
4279                                 # updates on *DEPEND when appropriate.
4280                                 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4281                                 try:
4282                                         portage.dep._dep_check_strict = False
4283                                         success, atoms = portage.dep_check(depstr,
4284                                                 vardb, settings, myuse=inst_pkg.use.enabled,
4285                                                 trees=dep_check_trees, myroot=inst_pkg.root)
4286                                 finally:
4287                                         portage.dep._dep_check_strict = True
4288                                 if not success:
4289                                         pkg_location = os.path.join(inst_pkg.root,
4290                                                 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4291                                         portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4292                                                 (pkg_location, atoms), noiselevel=-1)
4293                                         continue
4294
4295                                 blocker_atoms = [atom for atom in atoms \
4296                                         if atom.startswith("!")]
4297                                 blocker_atoms.sort()
4298                                 counter = long(inst_pkg.metadata["COUNTER"])
4299                                 blocker_cache[inst_pkg.cpv] = \
4300                                         blocker_cache.BlockerData(counter, blocker_atoms)
4301                 for cpv in stale_cache:
4302                         del blocker_cache[cpv]
4303                 blocker_cache.flush()
4304
4305                 blocker_parents = digraph()
4306                 blocker_atoms = []
4307                 for pkg in installed_pkgs:
4308                         for blocker_atom in blocker_cache[pkg.cpv].atoms:
4309                                 blocker_atom = blocker_atom.lstrip("!")
4310                                 blocker_atoms.append(blocker_atom)
4311                                 blocker_parents.add(blocker_atom, pkg)
4312
4313                 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4314                 blocking_pkgs = set()
4315                 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4316                         blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4317
4318                 # Check for blockers in the other direction.
4319                 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4320                 try:
4321                         portage.dep._dep_check_strict = False
4322                         success, atoms = portage.dep_check(depstr,
4323                                 vardb, settings, myuse=new_pkg.use.enabled,
4324                                 trees=dep_check_trees, myroot=new_pkg.root)
4325                 finally:
4326                         portage.dep._dep_check_strict = True
4327                 if not success:
4328                         # We should never get this far with invalid deps.
4329                         show_invalid_depstring_notice(new_pkg, depstr, atoms)
4330                         assert False
4331
4332                 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4333                         if atom[:1] == "!"]
4334                 if blocker_atoms:
4335                         blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4336                         for inst_pkg in installed_pkgs:
4337                                 try:
4338                                         blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4339                                 except (portage.exception.InvalidDependString, StopIteration):
4340                                         continue
4341                                 blocking_pkgs.add(inst_pkg)
4342
4343                 return blocking_pkgs
4344
4345 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4346
4347         msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4348                 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4349         p_type, p_root, p_key, p_status = parent_node
4350         msg = []
4351         if p_status == "nomerge":
4352                 category, pf = portage.catsplit(p_key)
4353                 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4354                 msg.append("Portage is unable to process the dependencies of the ")
4355                 msg.append("'%s' package. " % p_key)
4356                 msg.append("In order to correct this problem, the package ")
4357                 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4358                 msg.append("As a temporary workaround, the --nodeps option can ")
4359                 msg.append("be used to ignore all dependencies.  For reference, ")
4360                 msg.append("the problematic dependencies can be found in the ")
4361                 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4362         else:
4363                 msg.append("This package can not be installed. ")
4364                 msg.append("Please notify the '%s' package maintainer " % p_key)
4365                 msg.append("about this problem.")
4366
4367         msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4368         writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4369
4370 class PackageVirtualDbapi(portage.dbapi):
4371         """
4372         A dbapi-like interface class that represents the state of the installed
4373         package database as new packages are installed, replacing any packages
4374         that previously existed in the same slot. The main difference between
4375         this class and fakedbapi is that this one uses Package instances
4376         internally (passed in via cpv_inject() and cpv_remove() calls).
4377         """
4378         def __init__(self, settings):
4379                 portage.dbapi.__init__(self)
4380                 self.settings = settings
4381                 self._match_cache = {}
4382                 self._cp_map = {}
4383                 self._cpv_map = {}
4384
4385         def clear(self):
4386                 """
4387                 Remove all packages.
4388                 """
4389                 if self._cpv_map:
4390                         self._clear_cache()
4391                         self._cp_map.clear()
4392                         self._cpv_map.clear()
4393
4394         def copy(self):
4395                 obj = PackageVirtualDbapi(self.settings)
4396                 obj._match_cache = self._match_cache.copy()
4397                 obj._cp_map = self._cp_map.copy()
4398                 for k, v in obj._cp_map.iteritems():
4399                         obj._cp_map[k] = v[:]
4400                 obj._cpv_map = self._cpv_map.copy()
4401                 return obj
4402
4403         def __iter__(self):
4404                 return self._cpv_map.itervalues()
4405
4406         def __contains__(self, item):
4407                 existing = self._cpv_map.get(item.cpv)
4408                 if existing is not None and \
4409                         existing == item:
4410                         return True
4411                 return False
4412
4413         def get(self, item, default=None):
4414                 cpv = getattr(item, "cpv", None)
4415                 if cpv is None:
4416                         if len(item) != 4:
4417                                 return default
4418                         type_name, root, cpv, operation = item
4419
4420                 existing = self._cpv_map.get(cpv)
4421                 if existing is not None and \
4422                         existing == item:
4423                         return existing
4424                 return default
4425
4426         def match_pkgs(self, atom):
4427                 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4428
4429         def _clear_cache(self):
4430                 if self._categories is not None:
4431                         self._categories = None
4432                 if self._match_cache:
4433                         self._match_cache = {}
4434
4435         def match(self, origdep, use_cache=1):
4436                 result = self._match_cache.get(origdep)
4437                 if result is not None:
4438                         return result[:]
4439                 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4440                 self._match_cache[origdep] = result
4441                 return result[:]
4442
4443         def cpv_exists(self, cpv):
4444                 return cpv in self._cpv_map
4445
4446         def cp_list(self, mycp, use_cache=1):
4447                 cachelist = self._match_cache.get(mycp)
4448                 # cp_list() doesn't expand old-style virtuals
4449                 if cachelist and cachelist[0].startswith(mycp):
4450                         return cachelist[:]
4451                 cpv_list = self._cp_map.get(mycp)
4452                 if cpv_list is None:
4453                         cpv_list = []
4454                 else:
4455                         cpv_list = [pkg.cpv for pkg in cpv_list]
4456                 self._cpv_sort_ascending(cpv_list)
4457                 if not (not cpv_list and mycp.startswith("virtual/")):
4458                         self._match_cache[mycp] = cpv_list
4459                 return cpv_list[:]
4460
4461         def cp_all(self):
4462                 return list(self._cp_map)
4463
4464         def cpv_all(self):
4465                 return list(self._cpv_map)
4466
4467         def cpv_inject(self, pkg):
4468                 cp_list = self._cp_map.get(pkg.cp)
4469                 if cp_list is None:
4470                         cp_list = []
4471                         self._cp_map[pkg.cp] = cp_list
4472                 e_pkg = self._cpv_map.get(pkg.cpv)
4473                 if e_pkg is not None:
4474                         if e_pkg == pkg:
4475                                 return
4476                         self.cpv_remove(e_pkg)
4477                 for e_pkg in cp_list:
4478                         if e_pkg.slot_atom == pkg.slot_atom:
4479                                 if e_pkg == pkg:
4480                                         return
4481                                 self.cpv_remove(e_pkg)
4482                                 break
4483                 cp_list.append(pkg)
4484                 self._cpv_map[pkg.cpv] = pkg
4485                 self._clear_cache()
4486
4487         def cpv_remove(self, pkg):
4488                 old_pkg = self._cpv_map.get(pkg.cpv)
4489                 if old_pkg != pkg:
4490                         raise KeyError(pkg)
4491                 self._cp_map[pkg.cp].remove(pkg)
4492                 del self._cpv_map[pkg.cpv]
4493                 self._clear_cache()
4494
4495         def aux_get(self, cpv, wants):
4496                 metadata = self._cpv_map[cpv].metadata
4497                 return [metadata.get(x, "") for x in wants]
4498
4499         def aux_update(self, cpv, values):
4500                 self._cpv_map[cpv].metadata.update(values)
4501                 self._clear_cache()
4502
4503 class depgraph(object):
4504
4505         pkg_tree_map = RootConfig.pkg_tree_map
4506
4507         _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4508
4509         def __init__(self, settings, trees, myopts, myparams, spinner):
4510                 self.settings = settings
4511                 self.target_root = settings["ROOT"]
4512                 self.myopts = myopts
4513                 self.myparams = myparams
4514                 self.edebug = 0
4515                 if settings.get("PORTAGE_DEBUG", "") == "1":
4516                         self.edebug = 1
4517                 self.spinner = spinner
4518                 self._running_root = trees["/"]["root_config"]
4519                 self._opts_no_restart = Scheduler._opts_no_restart
4520                 self.pkgsettings = {}
4521                 # Maps slot atom to package for each Package added to the graph.
4522                 self._slot_pkg_map = {}
4523                 # Maps nodes to the reasons they were selected for reinstallation.
4524                 self._reinstall_nodes = {}
4525                 self.mydbapi = {}
4526                 self.trees = {}
4527                 self._trees_orig = trees
4528                 self.roots = {}
4529                 # Contains a filtered view of preferred packages that are selected
4530                 # from available repositories.
4531                 self._filtered_trees = {}
4532                 # Contains installed packages and new packages that have been added
4533                 # to the graph.
4534                 self._graph_trees = {}
4535                 # All Package instances
4536                 self._pkg_cache = {}
4537                 for myroot in trees:
4538                         self.trees[myroot] = {}
4539                         # Create a RootConfig instance that references
4540                         # the FakeVartree instead of the real one.
4541                         self.roots[myroot] = RootConfig(
4542                                 trees[myroot]["vartree"].settings,
4543                                 self.trees[myroot],
4544                                 trees[myroot]["root_config"].setconfig)
4545                         for tree in ("porttree", "bintree"):
4546                                 self.trees[myroot][tree] = trees[myroot][tree]
4547                         self.trees[myroot]["vartree"] = \
4548                                 FakeVartree(trees[myroot]["root_config"],
4549                                         pkg_cache=self._pkg_cache)
4550                         self.pkgsettings[myroot] = portage.config(
4551                                 clone=self.trees[myroot]["vartree"].settings)
4552                         self._slot_pkg_map[myroot] = {}
4553                         vardb = self.trees[myroot]["vartree"].dbapi
4554                         preload_installed_pkgs = "--nodeps" not in self.myopts and \
4555                                 "--buildpkgonly" not in self.myopts
4556                         # This fakedbapi instance will model the state that the vdb will
4557                         # have after new packages have been installed.
4558                         fakedb = PackageVirtualDbapi(vardb.settings)
4559                         if preload_installed_pkgs:
4560                                 for pkg in vardb:
4561                                         self.spinner.update()
4562                                         # This triggers metadata updates via FakeVartree.
4563                                         vardb.aux_get(pkg.cpv, [])
4564                                         fakedb.cpv_inject(pkg)
4565
4566                         # Now that the vardb state is cached in our FakeVartree,
4567                         # we won't be needing the real vartree cache for awhile.
4568                         # To make some room on the heap, clear the vardbapi
4569                         # caches.
4570                         trees[myroot]["vartree"].dbapi._clear_cache()
4571                         gc.collect()
4572
4573                         self.mydbapi[myroot] = fakedb
4574                         def graph_tree():
4575                                 pass
4576                         graph_tree.dbapi = fakedb
4577                         self._graph_trees[myroot] = {}
4578                         self._filtered_trees[myroot] = {}
4579                         # Substitute the graph tree for the vartree in dep_check() since we
4580                         # want atom selections to be consistent with package selections
4581                         # have already been made.
4582                         self._graph_trees[myroot]["porttree"]   = graph_tree
4583                         self._graph_trees[myroot]["vartree"]    = graph_tree
4584                         def filtered_tree():
4585                                 pass
4586                         filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4587                         self._filtered_trees[myroot]["porttree"] = filtered_tree
4588
4589                         # Passing in graph_tree as the vartree here could lead to better
4590                         # atom selections in some cases by causing atoms for packages that
4591                         # have been added to the graph to be preferred over other choices.
4592                         # However, it can trigger atom selections that result in
4593                         # unresolvable direct circular dependencies. For example, this
4594                         # happens with gwydion-dylan which depends on either itself or
4595                         # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4596                         # gwydion-dylan-bin needs to be selected in order to avoid a
4597                         # an unresolvable direct circular dependency.
4598                         #
4599                         # To solve the problem described above, pass in "graph_db" so that
4600                         # packages that have been added to the graph are distinguishable
4601                         # from other available packages and installed packages. Also, pass
4602                         # the parent package into self._select_atoms() calls so that
4603                         # unresolvable direct circular dependencies can be detected and
4604                         # avoided when possible.
4605                         self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4606                         self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4607
4608                         dbs = []
4609                         portdb = self.trees[myroot]["porttree"].dbapi
4610                         bindb  = self.trees[myroot]["bintree"].dbapi
4611                         vardb  = self.trees[myroot]["vartree"].dbapi
4612                         #               (db, pkg_type, built, installed, db_keys)
4613                         if "--usepkgonly" not in self.myopts:
4614                                 db_keys = list(portdb._aux_cache_keys)
4615                                 dbs.append((portdb, "ebuild", False, False, db_keys))
4616                         if "--usepkg" in self.myopts:
4617                                 db_keys = list(bindb._aux_cache_keys)
4618                                 dbs.append((bindb,  "binary", True, False, db_keys))
4619                         db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4620                         dbs.append((vardb, "installed", True, True, db_keys))
4621                         self._filtered_trees[myroot]["dbs"] = dbs
4622                         if "--usepkg" in self.myopts:
4623                                 self.trees[myroot]["bintree"].populate(
4624                                         "--getbinpkg" in self.myopts,
4625                                         "--getbinpkgonly" in self.myopts)
4626                 del trees
4627
4628                 self.digraph=portage.digraph()
4629                 # contains all sets added to the graph
4630                 self._sets = {}
4631                 # contains atoms given as arguments
4632                 self._sets["args"] = InternalPackageSet()
4633                 # contains all atoms from all sets added to the graph, including
4634                 # atoms given as arguments
4635                 self._set_atoms = InternalPackageSet()
4636                 self._atom_arg_map = {}
4637                 # contains all nodes pulled in by self._set_atoms
4638                 self._set_nodes = set()
4639                 # Contains only Blocker -> Uninstall edges
4640                 self._blocker_uninstalls = digraph()
4641                 # Contains only Package -> Blocker edges
4642                 self._blocker_parents = digraph()
4643                 # Contains only irrelevant Package -> Blocker edges
4644                 self._irrelevant_blockers = digraph()
4645                 # Contains only unsolvable Package -> Blocker edges
4646                 self._unsolvable_blockers = digraph()
4647                 # Contains all Blocker -> Blocked Package edges
4648                 self._blocked_pkgs = digraph()
4649                 # Contains world packages that have been protected from
4650                 # uninstallation but may not have been added to the graph
4651                 # if the graph is not complete yet.
4652                 self._blocked_world_pkgs = {}
4653                 self._slot_collision_info = {}
4654                 # Slot collision nodes are not allowed to block other packages since
4655                 # blocker validation is only able to account for one package per slot.
4656                 self._slot_collision_nodes = set()
4657                 self._parent_atoms = {}
4658                 self._slot_conflict_parent_atoms = set()
4659                 self._serialized_tasks_cache = None
4660                 self._scheduler_graph = None
4661                 self._displayed_list = None
4662                 self._pprovided_args = []
4663                 self._missing_args = []
4664                 self._masked_installed = set()
4665                 self._unsatisfied_deps_for_display = []
4666                 self._unsatisfied_blockers_for_display = None
4667                 self._circular_deps_for_display = None
4668                 self._dep_stack = []
4669                 self._unsatisfied_deps = []
4670                 self._initially_unsatisfied_deps = []
4671                 self._ignored_deps = []
4672                 self._required_set_names = set(["system", "world"])
4673                 self._select_atoms = self._select_atoms_highest_available
4674                 self._select_package = self._select_pkg_highest_available
4675                 self._highest_pkg_cache = {}
4676
4677         def _show_slot_collision_notice(self):
4678                 """Show an informational message advising the user to mask one of the
4679                 the packages. In some cases it may be possible to resolve this
4680                 automatically, but support for backtracking (removal nodes that have
4681                 already been selected) will be required in order to handle all possible
4682                 cases.
4683                 """
4684
4685                 if not self._slot_collision_info:
4686                         return
4687
4688                 self._show_merge_list()
4689
4690                 msg = []
4691                 msg.append("\n!!! Multiple package instances within a single " + \
4692                         "package slot have been pulled\n")
4693                 msg.append("!!! into the dependency graph, resulting" + \
4694                         " in a slot conflict:\n\n")
4695                 indent = "  "
4696                 # Max number of parents shown, to avoid flooding the display.
4697                 max_parents = 3
4698                 explanation_columns = 70
4699                 explanations = 0
4700                 for (slot_atom, root), slot_nodes \
4701                         in self._slot_collision_info.iteritems():
4702                         msg.append(str(slot_atom))
4703                         msg.append("\n\n")
4704
4705                         for node in slot_nodes:
4706                                 msg.append(indent)
4707                                 msg.append(str(node))
4708                                 parent_atoms = self._parent_atoms.get(node)
4709                                 if parent_atoms:
4710                                         pruned_list = set()
4711                                         # Prefer conflict atoms over others.
4712                                         for parent_atom in parent_atoms:
4713                                                 if len(pruned_list) >= max_parents:
4714                                                         break
4715                                                 if parent_atom in self._slot_conflict_parent_atoms:
4716                                                         pruned_list.add(parent_atom)
4717
4718                                         # If this package was pulled in by conflict atoms then
4719                                         # show those alone since those are the most interesting.
4720                                         if not pruned_list:
4721                                                 # When generating the pruned list, prefer instances
4722                                                 # of DependencyArg over instances of Package.
4723                                                 for parent_atom in parent_atoms:
4724                                                         if len(pruned_list) >= max_parents:
4725                                                                 break
4726                                                         parent, atom = parent_atom
4727                                                         if isinstance(parent, DependencyArg):
4728                                                                 pruned_list.add(parent_atom)
4729                                                 # Prefer Packages instances that themselves have been
4730                                                 # pulled into collision slots.
4731                                                 for parent_atom in parent_atoms:
4732                                                         if len(pruned_list) >= max_parents:
4733                                                                 break
4734                                                         parent, atom = parent_atom
4735                                                         if isinstance(parent, Package) and \
4736                                                                 (parent.slot_atom, parent.root) \
4737                                                                 in self._slot_collision_info:
4738                                                                 pruned_list.add(parent_atom)
4739                                                 for parent_atom in parent_atoms:
4740                                                         if len(pruned_list) >= max_parents:
4741                                                                 break
4742                                                         pruned_list.add(parent_atom)
4743                                         omitted_parents = len(parent_atoms) - len(pruned_list)
4744                                         parent_atoms = pruned_list
4745                                         msg.append(" pulled in by\n")
4746                                         for parent_atom in parent_atoms:
4747                                                 parent, atom = parent_atom
4748                                                 msg.append(2*indent)
4749                                                 if isinstance(parent,
4750                                                         (PackageArg, AtomArg)):
4751                                                         # For PackageArg and AtomArg types, it's
4752                                                         # redundant to display the atom attribute.
4753                                                         msg.append(str(parent))
4754                                                 else:
4755                                                         # Display the specific atom from SetArg or
4756                                                         # Package types.
4757                                                         msg.append("%s required by %s" % (atom, parent))
4758                                                 msg.append("\n")
4759                                         if omitted_parents:
4760                                                 msg.append(2*indent)
4761                                                 msg.append("(and %d more)\n" % omitted_parents)
4762                                 else:
4763                                         msg.append(" (no parents)\n")
4764                                 msg.append("\n")
4765                         explanation = self._slot_conflict_explanation(slot_nodes)
4766                         if explanation:
4767                                 explanations += 1
4768                                 msg.append(indent + "Explanation:\n\n")
4769                                 for line in textwrap.wrap(explanation, explanation_columns):
4770                                         msg.append(2*indent + line + "\n")
4771                                 msg.append("\n")
4772                 msg.append("\n")
4773                 sys.stderr.write("".join(msg))
4774                 sys.stderr.flush()
4775
4776                 explanations_for_all = explanations == len(self._slot_collision_info)
4777
4778                 if explanations_for_all or "--quiet" in self.myopts:
4779                         return
4780
4781                 msg = []
4782                 msg.append("It may be possible to solve this problem ")
4783                 msg.append("by using package.mask to prevent one of ")
4784                 msg.append("those packages from being selected. ")
4785                 msg.append("However, it is also possible that conflicting ")
4786                 msg.append("dependencies exist such that they are impossible to ")
4787                 msg.append("satisfy simultaneously.  If such a conflict exists in ")
4788                 msg.append("the dependencies of two different packages, then those ")
4789                 msg.append("packages can not be installed simultaneously.")
4790
4791                 from formatter import AbstractFormatter, DumbWriter
4792                 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4793                 for x in msg:
4794                         f.add_flowing_data(x)
4795                 f.end_paragraph(1)
4796
4797                 msg = []
4798                 msg.append("For more information, see MASKED PACKAGES ")
4799                 msg.append("section in the emerge man page or refer ")
4800                 msg.append("to the Gentoo Handbook.")
4801                 for x in msg:
4802                         f.add_flowing_data(x)
4803                 f.end_paragraph(1)
4804                 f.writer.flush()
4805
4806         def _slot_conflict_explanation(self, slot_nodes):
4807                 """
4808                 When a slot conflict occurs due to USE deps, there are a few
4809                 different cases to consider:
4810
4811                 1) New USE are correctly set but --newuse wasn't requested so an
4812                    installed package with incorrect USE happened to get pulled
4813                    into graph before the new one.
4814
4815                 2) New USE are incorrectly set but an installed package has correct
4816                    USE so it got pulled into the graph, and a new instance also got
4817                    pulled in due to --newuse or an upgrade.
4818
4819                 3) Multiple USE deps exist that can't be satisfied simultaneously,
4820                    and multiple package instances got pulled into the same slot to
4821                    satisfy the conflicting deps.
4822
4823                 Currently, explanations and suggested courses of action are generated
4824                 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4825                 """
4826
4827                 if len(slot_nodes) != 2:
4828                         # Suggestions are only implemented for
4829                         # conflicts between two packages.
4830                         return None
4831
4832                 all_conflict_atoms = self._slot_conflict_parent_atoms
4833                 matched_node = None
4834                 matched_atoms = None
4835                 unmatched_node = None
4836                 for node in slot_nodes:
4837                         parent_atoms = self._parent_atoms.get(node)
4838                         if not parent_atoms:
4839                                 # Normally, there are always parent atoms. If there are
4840                                 # none then something unexpected is happening and there's
4841                                 # currently no suggestion for this case.
4842                                 return None
4843                         conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4844                         for parent_atom in conflict_atoms:
4845                                 parent, atom = parent_atom
4846                                 if not atom.use:
4847                                         # Suggestions are currently only implemented for cases
4848                                         # in which all conflict atoms have USE deps.
4849                                         return None
4850                         if conflict_atoms:
4851                                 if matched_node is not None:
4852                                         # If conflict atoms match multiple nodes
4853                                         # then there's no suggestion.
4854                                         return None
4855                                 matched_node = node
4856                                 matched_atoms = conflict_atoms
4857                         else:
4858                                 if unmatched_node is not None:
4859                                         # Neither node is matched by conflict atoms, and
4860                                         # there is no suggestion for this case.
4861                                         return None
4862                                 unmatched_node = node
4863
4864                 if matched_node is None or unmatched_node is None:
4865                         # This shouldn't happen.
4866                         return None
4867
4868                 if unmatched_node.installed and not matched_node.installed:
4869                         return "New USE are correctly set, but --newuse wasn't" + \
4870                                 " requested, so an installed package with incorrect USE " + \
4871                                 "happened to get pulled into the dependency graph. " + \
4872                                 "In order to solve " + \
4873                                 "this, either specify the --newuse option or explicitly " + \
4874                                 " reinstall '%s'." % matched_node.slot_atom
4875
4876                 if matched_node.installed and not unmatched_node.installed:
4877                         atoms = sorted(set(atom for parent, atom in matched_atoms))
4878                         explanation = ("New USE for '%s' are incorrectly set. " + \
4879                                 "In order to solve this, adjust USE to satisfy '%s'") % \
4880                                 (matched_node.slot_atom, atoms[0])
4881                         if len(atoms) > 1:
4882                                 for atom in atoms[1:-1]:
4883                                         explanation += ", '%s'" % (atom,)
4884                                 if len(atoms) > 2:
4885                                         explanation += ","
4886                                 explanation += " and '%s'" % (atoms[-1],)
4887                         explanation += "."
4888                         return explanation
4889
4890                 return None
4891
4892         def _process_slot_conflicts(self):
4893                 """
4894                 Process slot conflict data to identify specific atoms which
4895                 lead to conflict. These atoms only match a subset of the
4896                 packages that have been pulled into a given slot.
4897                 """
4898                 for (slot_atom, root), slot_nodes \
4899                         in self._slot_collision_info.iteritems():
4900
4901                         all_parent_atoms = set()
4902                         for pkg in slot_nodes:
4903                                 parent_atoms = self._parent_atoms.get(pkg)
4904                                 if not parent_atoms:
4905                                         continue
4906                                 all_parent_atoms.update(parent_atoms)
4907
4908                         for pkg in slot_nodes:
4909                                 parent_atoms = self._parent_atoms.get(pkg)
4910                                 if parent_atoms is None:
4911                                         parent_atoms = set()
4912                                         self._parent_atoms[pkg] = parent_atoms
4913                                 for parent_atom in all_parent_atoms:
4914                                         if parent_atom in parent_atoms:
4915                                                 continue
4916                                         # Use package set for matching since it will match via
4917                                         # PROVIDE when necessary, while match_from_list does not.
4918                                         parent, atom = parent_atom
4919                                         atom_set = InternalPackageSet(
4920                                                 initial_atoms=(atom,))
4921                                         if atom_set.findAtomForPackage(pkg):
4922                                                 parent_atoms.add(parent_atom)
4923                                         else:
4924                                                 self._slot_conflict_parent_atoms.add(parent_atom)
4925
4926         def _reinstall_for_flags(self, forced_flags,
4927                 orig_use, orig_iuse, cur_use, cur_iuse):
4928                 """Return a set of flags that trigger reinstallation, or None if there
4929                 are no such flags."""
4930                 if "--newuse" in self.myopts:
4931                         flags = set(orig_iuse.symmetric_difference(
4932                                 cur_iuse).difference(forced_flags))
4933                         flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4934                                 cur_iuse.intersection(cur_use)))
4935                         if flags:
4936                                 return flags
4937                 elif "changed-use" == self.myopts.get("--reinstall"):
4938                         flags = orig_iuse.intersection(orig_use).symmetric_difference(
4939                                 cur_iuse.intersection(cur_use))
4940                         if flags:
4941                                 return flags
4942                 return None
4943
4944         def _create_graph(self, allow_unsatisfied=False):
4945                 dep_stack = self._dep_stack
4946                 while dep_stack:
4947                         self.spinner.update()
4948                         dep = dep_stack.pop()
4949                         if isinstance(dep, Package):
4950                                 if not self._add_pkg_deps(dep,
4951                                         allow_unsatisfied=allow_unsatisfied):
4952                                         return 0
4953                                 continue
4954                         if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4955                                 return 0
4956                 return 1
4957
4958         def _add_dep(self, dep, allow_unsatisfied=False):
4959                 debug = "--debug" in self.myopts
4960                 buildpkgonly = "--buildpkgonly" in self.myopts
4961                 nodeps = "--nodeps" in self.myopts
4962                 empty = "empty" in self.myparams
4963                 deep = "deep" in self.myparams
4964                 update = "--update" in self.myopts and dep.depth <= 1
4965                 if dep.blocker:
4966                         if not buildpkgonly and \
4967                                 not nodeps and \
4968                                 dep.parent not in self._slot_collision_nodes:
4969                                 if dep.parent.onlydeps:
4970                                         # It's safe to ignore blockers if the
4971                                         # parent is an --onlydeps node.
4972                                         return 1
4973                                 # The blocker applies to the root where
4974                                 # the parent is or will be installed.
4975                                 blocker = Blocker(atom=dep.atom,
4976                                         eapi=dep.parent.metadata["EAPI"],
4977                                         root=dep.parent.root)
4978                                 self._blocker_parents.add(blocker, dep.parent)
4979                         return 1
4980                 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
4981                         onlydeps=dep.onlydeps)
4982                 if not dep_pkg:
4983                         if dep.priority.optional:
4984                                 # This could be an unecessary build-time dep
4985                                 # pulled in by --with-bdeps=y.
4986                                 return 1
4987                         if allow_unsatisfied:
4988                                 self._unsatisfied_deps.append(dep)
4989                                 return 1
4990                         self._unsatisfied_deps_for_display.append(
4991                                 ((dep.root, dep.atom), {"myparent":dep.parent}))
4992                         return 0
4993                 # In some cases, dep_check will return deps that shouldn't
4994                 # be proccessed any further, so they are identified and
4995                 # discarded here. Try to discard as few as possible since
4996                 # discarded dependencies reduce the amount of information
4997                 # available for optimization of merge order.
4998                 if dep.priority.satisfied and \
4999                         not dep_pkg.installed and \
5000                         not (existing_node or empty or deep or update):
5001                         myarg = None
5002                         if dep.root == self.target_root:
5003                                 try:
5004                                         myarg = self._iter_atoms_for_pkg(dep_pkg).next()
5005                                 except StopIteration:
5006                                         pass
5007                                 except portage.exception.InvalidDependString:
5008                                         if not dep_pkg.installed:
5009                                                 # This shouldn't happen since the package
5010                                                 # should have been masked.
5011                                                 raise
5012                         if not myarg:
5013                                 self._ignored_deps.append(dep)
5014                                 return 1
5015
5016                 if not self._add_pkg(dep_pkg, dep):
5017                         return 0
5018                 return 1
5019
5020         def _add_pkg(self, pkg, dep):
5021                 myparent = None
5022                 priority = None
5023                 depth = 0
5024                 if dep is None:
5025                         dep = Dependency()
5026                 else:
5027                         myparent = dep.parent
5028                         priority = dep.priority
5029                         depth = dep.depth
5030                 if priority is None:
5031                         priority = DepPriority()
5032                 """
5033                 Fills the digraph with nodes comprised of packages to merge.
5034                 mybigkey is the package spec of the package to merge.
5035                 myparent is the package depending on mybigkey ( or None )
5036                 addme = Should we add this package to the digraph or are we just looking at it's deps?
5037                         Think --onlydeps, we need to ignore packages in that case.
5038                 #stuff to add:
5039                 #SLOT-aware emerge
5040                 #IUSE-aware emerge -> USE DEP aware depgraph
5041                 #"no downgrade" emerge
5042                 """
5043                 # Ensure that the dependencies of the same package
5044                 # are never processed more than once.
5045                 previously_added = pkg in self.digraph
5046
5047                 # select the correct /var database that we'll be checking against
5048                 vardbapi = self.trees[pkg.root]["vartree"].dbapi
5049                 pkgsettings = self.pkgsettings[pkg.root]
5050
5051                 arg_atoms = None
5052                 if True:
5053                         try:
5054                                 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
5055                         except portage.exception.InvalidDependString, e:
5056                                 if not pkg.installed:
5057                                         show_invalid_depstring_notice(
5058                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5059                                         return 0
5060                                 del e
5061
5062                 if not pkg.onlydeps:
5063                         if not pkg.installed and \
5064                                 "empty" not in self.myparams and \
5065                                 vardbapi.match(pkg.slot_atom):
5066                                 # Increase the priority of dependencies on packages that
5067                                 # are being rebuilt. This optimizes merge order so that
5068                                 # dependencies are rebuilt/updated as soon as possible,
5069                                 # which is needed especially when emerge is called by
5070                                 # revdep-rebuild since dependencies may be affected by ABI
5071                                 # breakage that has rendered them useless. Don't adjust
5072                                 # priority here when in "empty" mode since all packages
5073                                 # are being merged in that case.
5074                                 priority.rebuild = True
5075
5076                         existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
5077                         slot_collision = False
5078                         if existing_node:
5079                                 existing_node_matches = pkg.cpv == existing_node.cpv
5080                                 if existing_node_matches and \
5081                                         pkg != existing_node and \
5082                                         dep.atom is not None:
5083                                         # Use package set for matching since it will match via
5084                                         # PROVIDE when necessary, while match_from_list does not.
5085                                         atom_set = InternalPackageSet(initial_atoms=[dep.atom])
5086                                         if not atom_set.findAtomForPackage(existing_node):
5087                                                 existing_node_matches = False
5088                                 if existing_node_matches:
5089                                         # The existing node can be reused.
5090                                         if arg_atoms:
5091                                                 for parent_atom in arg_atoms:
5092                                                         parent, atom = parent_atom
5093                                                         self.digraph.add(existing_node, parent,
5094                                                                 priority=priority)
5095                                                         self._add_parent_atom(existing_node, parent_atom)
5096                                         # If a direct circular dependency is not an unsatisfied
5097                                         # buildtime dependency then drop it here since otherwise
5098                                         # it can skew the merge order calculation in an unwanted
5099                                         # way.
5100                                         if existing_node != myparent or \
5101                                                 (priority.buildtime and not priority.satisfied):
5102                                                 self.digraph.addnode(existing_node, myparent,
5103                                                         priority=priority)
5104                                                 if dep.atom is not None and dep.parent is not None:
5105                                                         self._add_parent_atom(existing_node,
5106                                                                 (dep.parent, dep.atom))
5107                                         return 1
5108                                 else:
5109
5110                                         # A slot collision has occurred.  Sometimes this coincides
5111                                         # with unresolvable blockers, so the slot collision will be
5112                                         # shown later if there are no unresolvable blockers.
5113                                         self._add_slot_conflict(pkg)
5114                                         slot_collision = True
5115
5116                         if slot_collision:
5117                                 # Now add this node to the graph so that self.display()
5118                                 # can show use flags and --tree portage.output.  This node is
5119                                 # only being partially added to the graph.  It must not be
5120                                 # allowed to interfere with the other nodes that have been
5121                                 # added.  Do not overwrite data for existing nodes in
5122                                 # self.mydbapi since that data will be used for blocker
5123                                 # validation.
5124                                 # Even though the graph is now invalid, continue to process
5125                                 # dependencies so that things like --fetchonly can still
5126                                 # function despite collisions.
5127                                 pass
5128                         elif not previously_added:
5129                                 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
5130                                 self.mydbapi[pkg.root].cpv_inject(pkg)
5131                                 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
5132
5133                         if not pkg.installed:
5134                                 # Allow this package to satisfy old-style virtuals in case it
5135                                 # doesn't already. Any pre-existing providers will be preferred
5136                                 # over this one.
5137                                 try:
5138                                         pkgsettings.setinst(pkg.cpv, pkg.metadata)
5139                                         # For consistency, also update the global virtuals.
5140                                         settings = self.roots[pkg.root].settings
5141                                         settings.unlock()
5142                                         settings.setinst(pkg.cpv, pkg.metadata)
5143                                         settings.lock()
5144                                 except portage.exception.InvalidDependString, e:
5145                                         show_invalid_depstring_notice(
5146                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5147                                         del e
5148                                         return 0
5149
5150                 if arg_atoms:
5151                         self._set_nodes.add(pkg)
5152
5153                 # Do this even when addme is False (--onlydeps) so that the
5154                 # parent/child relationship is always known in case
5155                 # self._show_slot_collision_notice() needs to be called later.
5156                 self.digraph.add(pkg, myparent, priority=priority)
5157                 if dep.atom is not None and dep.parent is not None:
5158                         self._add_parent_atom(pkg, (dep.parent, dep.atom))
5159
5160                 if arg_atoms:
5161                         for parent_atom in arg_atoms:
5162                                 parent, atom = parent_atom
5163                                 self.digraph.add(pkg, parent, priority=priority)
5164                                 self._add_parent_atom(pkg, parent_atom)
5165
5166                 """ This section determines whether we go deeper into dependencies or not.
5167                     We want to go deeper on a few occasions:
5168                     Installing package A, we need to make sure package A's deps are met.
5169                     emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5170                     If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5171                 """
5172                 dep_stack = self._dep_stack
5173                 if "recurse" not in self.myparams:
5174                         return 1
5175                 elif pkg.installed and \
5176                         "deep" not in self.myparams:
5177                         dep_stack = self._ignored_deps
5178
5179                 self.spinner.update()
5180
5181                 if arg_atoms:
5182                         depth = 0
5183                 pkg.depth = depth
5184                 if not previously_added:
5185                         dep_stack.append(pkg)
5186                 return 1
5187
5188         def _add_parent_atom(self, pkg, parent_atom):
5189                 parent_atoms = self._parent_atoms.get(pkg)
5190                 if parent_atoms is None:
5191                         parent_atoms = set()
5192                         self._parent_atoms[pkg] = parent_atoms
5193                 parent_atoms.add(parent_atom)
5194
5195         def _add_slot_conflict(self, pkg):
5196                 self._slot_collision_nodes.add(pkg)
5197                 slot_key = (pkg.slot_atom, pkg.root)
5198                 slot_nodes = self._slot_collision_info.get(slot_key)
5199                 if slot_nodes is None:
5200                         slot_nodes = set()
5201                         slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5202                         self._slot_collision_info[slot_key] = slot_nodes
5203                 slot_nodes.add(pkg)
5204
5205         def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5206
5207                 mytype = pkg.type_name
5208                 myroot = pkg.root
5209                 mykey = pkg.cpv
5210                 metadata = pkg.metadata
5211                 myuse = pkg.use.enabled
5212                 jbigkey = pkg
5213                 depth = pkg.depth + 1
5214                 removal_action = "remove" in self.myparams
5215
5216                 edepend={}
5217                 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5218                 for k in depkeys:
5219                         edepend[k] = metadata[k]
5220
5221                 if not pkg.built and \
5222                         "--buildpkgonly" in self.myopts and \
5223                         "deep" not in self.myparams and \
5224                         "empty" not in self.myparams:
5225                         edepend["RDEPEND"] = ""
5226                         edepend["PDEPEND"] = ""
5227                 bdeps_optional = False
5228
5229                 if pkg.built and not removal_action:
5230                         if self.myopts.get("--with-bdeps", "n") == "y":
5231                                 # Pull in build time deps as requested, but marked them as
5232                                 # "optional" since they are not strictly required. This allows
5233                                 # more freedom in the merge order calculation for solving
5234                                 # circular dependencies. Don't convert to PDEPEND since that
5235                                 # could make --with-bdeps=y less effective if it is used to
5236                                 # adjust merge order to prevent built_with_use() calls from
5237                                 # failing.
5238                                 bdeps_optional = True
5239                         else:
5240                                 # built packages do not have build time dependencies.
5241                                 edepend["DEPEND"] = ""
5242
5243                 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5244                         edepend["DEPEND"] = ""
5245
5246                 deps = (
5247                         ("/", edepend["DEPEND"],
5248                                 self._priority(buildtime=(not bdeps_optional),
5249                                 optional=bdeps_optional)),
5250                         (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5251                         (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5252                 )
5253
5254                 debug = "--debug" in self.myopts
5255                 strict = mytype != "installed"
5256                 try:
5257                         for dep_root, dep_string, dep_priority in deps:
5258                                 if not dep_string:
5259                                         continue
5260                                 if debug:
5261                                         print
5262                                         print "Parent:   ", jbigkey
5263                                         print "Depstring:", dep_string
5264                                         print "Priority:", dep_priority
5265                                 vardb = self.roots[dep_root].trees["vartree"].dbapi
5266                                 try:
5267                                         selected_atoms = self._select_atoms(dep_root,
5268                                                 dep_string, myuse=myuse, parent=pkg, strict=strict,
5269                                                 priority=dep_priority)
5270                                 except portage.exception.InvalidDependString, e:
5271                                         show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5272                                         return 0
5273                                 if debug:
5274                                         print "Candidates:", selected_atoms
5275
5276                                 for atom in selected_atoms:
5277                                         try:
5278
5279                                                 atom = portage.dep.Atom(atom)
5280
5281                                                 mypriority = dep_priority.copy()
5282                                                 if not atom.blocker and vardb.match(atom):
5283                                                         mypriority.satisfied = True
5284
5285                                                 if not self._add_dep(Dependency(atom=atom,
5286                                                         blocker=atom.blocker, depth=depth, parent=pkg,
5287                                                         priority=mypriority, root=dep_root),
5288                                                         allow_unsatisfied=allow_unsatisfied):
5289                                                         return 0
5290
5291                                         except portage.exception.InvalidAtom, e:
5292                                                 show_invalid_depstring_notice(
5293                                                         pkg, dep_string, str(e))
5294                                                 del e
5295                                                 if not pkg.installed:
5296                                                         return 0
5297
5298                                 if debug:
5299                                         print "Exiting...", jbigkey
5300                 except portage.exception.AmbiguousPackageName, e:
5301                         pkgs = e.args[0]
5302                         portage.writemsg("\n\n!!! An atom in the dependencies " + \
5303                                 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5304                         for cpv in pkgs:
5305                                 portage.writemsg("    %s\n" % cpv, noiselevel=-1)
5306                         portage.writemsg("\n", noiselevel=-1)
5307                         if mytype == "binary":
5308                                 portage.writemsg(
5309                                         "!!! This binary package cannot be installed: '%s'\n" % \
5310                                         mykey, noiselevel=-1)
5311                         elif mytype == "ebuild":
5312                                 portdb = self.roots[myroot].trees["porttree"].dbapi
5313                                 myebuild, mylocation = portdb.findname2(mykey)
5314                                 portage.writemsg("!!! This ebuild cannot be installed: " + \
5315                                         "'%s'\n" % myebuild, noiselevel=-1)
5316                         portage.writemsg("!!! Please notify the package maintainer " + \
5317                                 "that atoms must be fully-qualified.\n", noiselevel=-1)
5318                         return 0
5319                 return 1
5320
5321         def _priority(self, **kwargs):
5322                 if "remove" in self.myparams:
5323                         priority_constructor = UnmergeDepPriority
5324                 else:
5325                         priority_constructor = DepPriority
5326                 return priority_constructor(**kwargs)
5327
5328         def _dep_expand(self, root_config, atom_without_category):
5329                 """
5330                 @param root_config: a root config instance
5331                 @type root_config: RootConfig
5332                 @param atom_without_category: an atom without a category component
5333                 @type atom_without_category: String
5334                 @rtype: list
5335                 @returns: a list of atoms containing categories (possibly empty)
5336                 """
5337                 null_cp = portage.dep_getkey(insert_category_into_atom(
5338                         atom_without_category, "null"))
5339                 cat, atom_pn = portage.catsplit(null_cp)
5340
5341                 dbs = self._filtered_trees[root_config.root]["dbs"]
5342                 categories = set()
5343                 for db, pkg_type, built, installed, db_keys in dbs:
5344                         for cat in db.categories:
5345                                 if db.cp_list("%s/%s" % (cat, atom_pn)):
5346                                         categories.add(cat)
5347
5348                 deps = []
5349                 for cat in categories:
5350                         deps.append(insert_category_into_atom(
5351                                 atom_without_category, cat))
5352                 return deps
5353
5354         def _have_new_virt(self, root, atom_cp):
5355                 ret = False
5356                 for db, pkg_type, built, installed, db_keys in \
5357                         self._filtered_trees[root]["dbs"]:
5358                         if db.cp_list(atom_cp):
5359                                 ret = True
5360                                 break
5361                 return ret
5362
5363         def _iter_atoms_for_pkg(self, pkg):
5364                 # TODO: add multiple $ROOT support
5365                 if pkg.root != self.target_root:
5366                         return
5367                 atom_arg_map = self._atom_arg_map
5368                 root_config = self.roots[pkg.root]
5369                 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5370                         atom_cp = portage.dep_getkey(atom)
5371                         if atom_cp != pkg.cp and \
5372                                 self._have_new_virt(pkg.root, atom_cp):
5373                                 continue
5374                         visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5375                         visible_pkgs.reverse() # descending order
5376                         higher_slot = None
5377                         for visible_pkg in visible_pkgs:
5378                                 if visible_pkg.cp != atom_cp:
5379                                         continue
5380                                 if pkg >= visible_pkg:
5381                                         # This is descending order, and we're not
5382                                         # interested in any versions <= pkg given.
5383                                         break
5384                                 if pkg.slot_atom != visible_pkg.slot_atom:
5385                                         higher_slot = visible_pkg
5386                                         break
5387                         if higher_slot is not None:
5388                                 continue
5389                         for arg in atom_arg_map[(atom, pkg.root)]:
5390                                 if isinstance(arg, PackageArg) and \
5391                                         arg.package != pkg:
5392                                         continue
5393                                 yield arg, atom
5394
5395         def select_files(self, myfiles):
5396                 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5397                 appropriate depgraph and return a favorite list."""
5398                 debug = "--debug" in self.myopts
5399                 root_config = self.roots[self.target_root]
5400                 sets = root_config.sets
5401                 getSetAtoms = root_config.setconfig.getSetAtoms
5402                 myfavorites=[]
5403                 myroot = self.target_root
5404                 dbs = self._filtered_trees[myroot]["dbs"]
5405                 vardb = self.trees[myroot]["vartree"].dbapi
5406                 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5407                 portdb = self.trees[myroot]["porttree"].dbapi
5408                 bindb = self.trees[myroot]["bintree"].dbapi
5409                 pkgsettings = self.pkgsettings[myroot]
5410                 args = []
5411                 onlydeps = "--onlydeps" in self.myopts
5412                 lookup_owners = []
5413                 for x in myfiles:
5414                         ext = os.path.splitext(x)[1]
5415                         if ext==".tbz2":
5416                                 if not os.path.exists(x):
5417                                         if os.path.exists(
5418                                                 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5419                                                 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5420                                         elif os.path.exists(
5421                                                 os.path.join(pkgsettings["PKGDIR"], x)):
5422                                                 x = os.path.join(pkgsettings["PKGDIR"], x)
5423                                         else:
5424                                                 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5425                                                 print "!!! Please ensure the tbz2 exists as specified.\n"
5426                                                 return 0, myfavorites
5427                                 mytbz2=portage.xpak.tbz2(x)
5428                                 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5429                                 if os.path.realpath(x) != \
5430                                         os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5431                                         print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5432                                         return 0, myfavorites
5433                                 db_keys = list(bindb._aux_cache_keys)
5434                                 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5435                                 pkg = Package(type_name="binary", root_config=root_config,
5436                                         cpv=mykey, built=True, metadata=metadata,
5437                                         onlydeps=onlydeps)
5438                                 self._pkg_cache[pkg] = pkg
5439                                 args.append(PackageArg(arg=x, package=pkg,
5440                                         root_config=root_config))
5441                         elif ext==".ebuild":
5442                                 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5443                                 pkgdir = os.path.dirname(ebuild_path)
5444                                 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5445                                 cp = pkgdir[len(tree_root)+1:]
5446                                 e = portage.exception.PackageNotFound(
5447                                         ("%s is not in a valid portage tree " + \
5448                                         "hierarchy or does not exist") % x)
5449                                 if not portage.isvalidatom(cp):
5450                                         raise e
5451                                 cat = portage.catsplit(cp)[0]
5452                                 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5453                                 if not portage.isvalidatom("="+mykey):
5454                                         raise e
5455                                 ebuild_path = portdb.findname(mykey)
5456                                 if ebuild_path:
5457                                         if ebuild_path != os.path.join(os.path.realpath(tree_root),
5458                                                 cp, os.path.basename(ebuild_path)):
5459                                                 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5460                                                 return 0, myfavorites
5461                                         if mykey not in portdb.xmatch(
5462                                                 "match-visible", portage.dep_getkey(mykey)):
5463                                                 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5464                                                 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5465                                                 print colorize("BAD", "*** page for details.")
5466                                                 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5467                                                         "Continuing...")
5468                                 else:
5469                                         raise portage.exception.PackageNotFound(
5470                                                 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5471                                 db_keys = list(portdb._aux_cache_keys)
5472                                 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5473                                 pkg = Package(type_name="ebuild", root_config=root_config,
5474                                         cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5475                                 pkgsettings.setcpv(pkg)
5476                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5477                                 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
5478                                 self._pkg_cache[pkg] = pkg
5479                                 args.append(PackageArg(arg=x, package=pkg,
5480                                         root_config=root_config))
5481                         elif x.startswith(os.path.sep):
5482                                 if not x.startswith(myroot):
5483                                         portage.writemsg(("\n\n!!! '%s' does not start with" + \
5484                                                 " $ROOT.\n") % x, noiselevel=-1)
5485                                         return 0, []
5486                                 # Queue these up since it's most efficient to handle
5487                                 # multiple files in a single iter_owners() call.
5488                                 lookup_owners.append(x)
5489                         else:
5490                                 if x in ("system", "world"):
5491                                         x = SETPREFIX + x
5492                                 if x.startswith(SETPREFIX):
5493                                         s = x[len(SETPREFIX):]
5494                                         if s not in sets:
5495                                                 raise portage.exception.PackageSetNotFound(s)
5496                                         if s in self._sets:
5497                                                 continue
5498                                         # Recursively expand sets so that containment tests in
5499                                         # self._get_parent_sets() properly match atoms in nested
5500                                         # sets (like if world contains system).
5501                                         expanded_set = InternalPackageSet(
5502                                                 initial_atoms=getSetAtoms(s))
5503                                         self._sets[s] = expanded_set
5504                                         args.append(SetArg(arg=x, set=expanded_set,
5505                                                 root_config=root_config))
5506                                         continue
5507                                 if not is_valid_package_atom(x):
5508                                         portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5509                                                 noiselevel=-1)
5510                                         portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5511                                         portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5512                                         return (0,[])
5513                                 # Don't expand categories or old-style virtuals here unless
5514                                 # necessary. Expansion of old-style virtuals here causes at
5515                                 # least the following problems:
5516                                 #   1) It's more difficult to determine which set(s) an atom
5517                                 #      came from, if any.
5518                                 #   2) It takes away freedom from the resolver to choose other
5519                                 #      possible expansions when necessary.
5520                                 if "/" in x:
5521                                         args.append(AtomArg(arg=x, atom=x,
5522                                                 root_config=root_config))
5523                                         continue
5524                                 expanded_atoms = self._dep_expand(root_config, x)
5525                                 installed_cp_set = set()
5526                                 for atom in expanded_atoms:
5527                                         atom_cp = portage.dep_getkey(atom)
5528                                         if vardb.cp_list(atom_cp):
5529                                                 installed_cp_set.add(atom_cp)
5530                                 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5531                                         installed_cp = iter(installed_cp_set).next()
5532                                         expanded_atoms = [atom for atom in expanded_atoms \
5533                                                 if portage.dep_getkey(atom) == installed_cp]
5534
5535                                 if len(expanded_atoms) > 1:
5536                                         print
5537                                         print
5538                                         ambiguous_package_name(x, expanded_atoms, root_config,
5539                                                 self.spinner, self.myopts)
5540                                         return False, myfavorites
5541                                 if expanded_atoms:
5542                                         atom = expanded_atoms[0]
5543                                 else:
5544                                         null_atom = insert_category_into_atom(x, "null")
5545                                         null_cp = portage.dep_getkey(null_atom)
5546                                         cat, atom_pn = portage.catsplit(null_cp)
5547                                         virts_p = root_config.settings.get_virts_p().get(atom_pn)
5548                                         if virts_p:
5549                                                 # Allow the depgraph to choose which virtual.
5550                                                 atom = insert_category_into_atom(x, "virtual")
5551                                         else:
5552                                                 atom = insert_category_into_atom(x, "null")
5553
5554                                 args.append(AtomArg(arg=x, atom=atom,
5555                                         root_config=root_config))
5556
5557                 if lookup_owners:
5558                         relative_paths = []
5559                         search_for_multiple = False
5560                         if len(lookup_owners) > 1:
5561                                 search_for_multiple = True
5562
5563                         for x in lookup_owners:
5564                                 if not search_for_multiple and os.path.isdir(x):
5565                                         search_for_multiple = True
5566                                 relative_paths.append(x[len(myroot):])
5567
5568                         owners = set()
5569                         for pkg, relative_path in \
5570                                 real_vardb._owners.iter_owners(relative_paths):
5571                                 owners.add(pkg.mycpv)
5572                                 if not search_for_multiple:
5573                                         break
5574
5575                         if not owners:
5576                                 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5577                                         "by any package.\n") % lookup_owners[0], noiselevel=-1)
5578                                 return 0, []
5579
5580                         for cpv in owners:
5581                                 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5582                                 if not slot:
5583                                         # portage now masks packages with missing slot, but it's
5584                                         # possible that one was installed by an older version
5585                                         atom = portage.cpv_getkey(cpv)
5586                                 else:
5587                                         atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5588                                 args.append(AtomArg(arg=atom, atom=atom,
5589                                         root_config=root_config))
5590
5591                 if "--update" in self.myopts:
5592                         # In some cases, the greedy slots behavior can pull in a slot that
5593                         # the user would want to uninstall due to it being blocked by a
5594                         # newer version in a different slot. Therefore, it's necessary to
5595                         # detect and discard any that should be uninstalled. Each time
5596                         # that arguments are updated, package selections are repeated in
5597                         # order to ensure consistency with the current arguments:
5598                         #
5599                         #  1) Initialize args
5600                         #  2) Select packages and generate initial greedy atoms
5601                         #  3) Update args with greedy atoms
5602                         #  4) Select packages and generate greedy atoms again, while
5603                         #     accounting for any blockers between selected packages
5604                         #  5) Update args with revised greedy atoms
5605
5606                         self._set_args(args)
5607                         greedy_args = []
5608                         for arg in args:
5609                                 greedy_args.append(arg)
5610                                 if not isinstance(arg, AtomArg):
5611                                         continue
5612                                 for atom in self._greedy_slots(arg.root_config, arg.atom):
5613                                         greedy_args.append(
5614                                                 AtomArg(arg=arg.arg, atom=atom,
5615                                                         root_config=arg.root_config))
5616
5617                         self._set_args(greedy_args)
5618                         del greedy_args
5619
5620                         # Revise greedy atoms, accounting for any blockers
5621                         # between selected packages.
5622                         revised_greedy_args = []
5623                         for arg in args:
5624                                 revised_greedy_args.append(arg)
5625                                 if not isinstance(arg, AtomArg):
5626                                         continue
5627                                 for atom in self._greedy_slots(arg.root_config, arg.atom,
5628                                         blocker_lookahead=True):
5629                                         revised_greedy_args.append(
5630                                                 AtomArg(arg=arg.arg, atom=atom,
5631                                                         root_config=arg.root_config))
5632                         args = revised_greedy_args
5633                         del revised_greedy_args
5634
5635                 self._set_args(args)
5636
5637                 myfavorites = set(myfavorites)
5638                 for arg in args:
5639                         if isinstance(arg, (AtomArg, PackageArg)):
5640                                 myfavorites.add(arg.atom)
5641                         elif isinstance(arg, SetArg):
5642                                 myfavorites.add(arg.arg)
5643                 myfavorites = list(myfavorites)
5644
5645                 pprovideddict = pkgsettings.pprovideddict
5646                 if debug:
5647                         portage.writemsg("\n", noiselevel=-1)
5648                 # Order needs to be preserved since a feature of --nodeps
5649                 # is to allow the user to force a specific merge order.
5650                 args.reverse()
5651                 while args:
5652                         arg = args.pop()
5653                         for atom in arg.set:
5654                                 self.spinner.update()
5655                                 dep = Dependency(atom=atom, onlydeps=onlydeps,
5656                                         root=myroot, parent=arg)
5657                                 atom_cp = portage.dep_getkey(atom)
5658                                 try:
5659                                         pprovided = pprovideddict.get(portage.dep_getkey(atom))
5660                                         if pprovided and portage.match_from_list(atom, pprovided):
5661                                                 # A provided package has been specified on the command line.
5662                                                 self._pprovided_args.append((arg, atom))
5663                                                 continue
5664                                         if isinstance(arg, PackageArg):
5665                                                 if not self._add_pkg(arg.package, dep) or \
5666                                                         not self._create_graph():
5667                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5668                                                                 "dependencies for %s\n") % arg.arg)
5669                                                         return 0, myfavorites
5670                                                 continue
5671                                         if debug:
5672                                                 portage.writemsg("      Arg: %s\n     Atom: %s\n" % \
5673                                                         (arg, atom), noiselevel=-1)
5674                                         pkg, existing_node = self._select_package(
5675                                                 myroot, atom, onlydeps=onlydeps)
5676                                         if not pkg:
5677                                                 if not (isinstance(arg, SetArg) and \
5678                                                         arg.name in ("system", "world")):
5679                                                         self._unsatisfied_deps_for_display.append(
5680                                                                 ((myroot, atom), {}))
5681                                                         return 0, myfavorites
5682                                                 self._missing_args.append((arg, atom))
5683                                                 continue
5684                                         if atom_cp != pkg.cp:
5685                                                 # For old-style virtuals, we need to repeat the
5686                                                 # package.provided check against the selected package.
5687                                                 expanded_atom = atom.replace(atom_cp, pkg.cp)
5688                                                 pprovided = pprovideddict.get(pkg.cp)
5689                                                 if pprovided and \
5690                                                         portage.match_from_list(expanded_atom, pprovided):
5691                                                         # A provided package has been
5692                                                         # specified on the command line.
5693                                                         self._pprovided_args.append((arg, atom))
5694                                                         continue
5695                                         if pkg.installed and "selective" not in self.myparams:
5696                                                 self._unsatisfied_deps_for_display.append(
5697                                                         ((myroot, atom), {}))
5698                                                 # Previous behavior was to bail out in this case, but
5699                                                 # since the dep is satisfied by the installed package,
5700                                                 # it's more friendly to continue building the graph
5701                                                 # and just show a warning message. Therefore, only bail
5702                                                 # out here if the atom is not from either the system or
5703                                                 # world set.
5704                                                 if not (isinstance(arg, SetArg) and \
5705                                                         arg.name in ("system", "world")):
5706                                                         return 0, myfavorites
5707
5708                                         # Add the selected package to the graph as soon as possible
5709                                         # so that later dep_check() calls can use it as feedback
5710                                         # for making more consistent atom selections.
5711                                         if not self._add_pkg(pkg, dep):
5712                                                 if isinstance(arg, SetArg):
5713                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5714                                                                 "dependencies for %s from %s\n") % \
5715                                                                 (atom, arg.arg))
5716                                                 else:
5717                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5718                                                                 "dependencies for %s\n") % atom)
5719                                                 return 0, myfavorites
5720
5721                                 except portage.exception.MissingSignature, e:
5722                                         portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5723                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5724                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5725                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5726                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5727                                         return 0, myfavorites
5728                                 except portage.exception.InvalidSignature, e:
5729                                         portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5730                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5731                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5732                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5733                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5734                                         return 0, myfavorites
5735                                 except SystemExit, e:
5736                                         raise # Needed else can't exit
5737                                 except Exception, e:
5738                                         print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5739                                         print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5740                                         raise
5741
5742                 # Now that the root packages have been added to the graph,
5743                 # process the dependencies.
5744                 if not self._create_graph():
5745                         return 0, myfavorites
5746
5747                 missing=0
5748                 if "--usepkgonly" in self.myopts:
5749                         for xs in self.digraph.all_nodes():
5750                                 if not isinstance(xs, Package):
5751                                         continue
5752                                 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5753                                         if missing == 0:
5754                                                 print
5755                                         missing += 1
5756                                         print "Missing binary for:",xs[2]
5757
5758                 try:
5759                         self.altlist()
5760                 except self._unknown_internal_error:
5761                         return False, myfavorites
5762
5763                 # We're true here unless we are missing binaries.
5764                 return (not missing,myfavorites)
5765
5766         def _set_args(self, args):
5767                 """
5768                 Create the "args" package set from atoms and packages given as
5769                 arguments. This method can be called multiple times if necessary.
5770                 The package selection cache is automatically invalidated, since
5771                 arguments influence package selections.
5772                 """
5773                 args_set = self._sets["args"]
5774                 args_set.clear()
5775                 for arg in args:
5776                         if not isinstance(arg, (AtomArg, PackageArg)):
5777                                 continue
5778                         atom = arg.atom
5779                         if atom in args_set:
5780                                 continue
5781                         args_set.add(atom)
5782
5783                 self._set_atoms.clear()
5784                 self._set_atoms.update(chain(*self._sets.itervalues()))
5785                 atom_arg_map = self._atom_arg_map
5786                 atom_arg_map.clear()
5787                 for arg in args:
5788                         for atom in arg.set:
5789                                 atom_key = (atom, arg.root_config.root)
5790                                 refs = atom_arg_map.get(atom_key)
5791                                 if refs is None:
5792                                         refs = []
5793                                         atom_arg_map[atom_key] = refs
5794                                         if arg not in refs:
5795                                                 refs.append(arg)
5796
5797                 # Invalidate the package selection cache, since
5798                 # arguments influence package selections.
5799                 self._highest_pkg_cache.clear()
5800                 for trees in self._filtered_trees.itervalues():
5801                         trees["porttree"].dbapi._clear_cache()
5802
5803         def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5804                 """
5805                 Return a list of slot atoms corresponding to installed slots that
5806                 differ from the slot of the highest visible match. When
5807                 blocker_lookahead is True, slot atoms that would trigger a blocker
5808                 conflict are automatically discarded, potentially allowing automatic
5809                 uninstallation of older slots when appropriate.
5810                 """
5811                 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5812                 if highest_pkg is None:
5813                         return []
5814                 vardb = root_config.trees["vartree"].dbapi
5815                 slots = set()
5816                 for cpv in vardb.match(atom):
5817                         # don't mix new virtuals with old virtuals
5818                         if portage.cpv_getkey(cpv) == highest_pkg.cp:
5819                                 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5820
5821                 slots.add(highest_pkg.metadata["SLOT"])
5822                 if len(slots) == 1:
5823                         return []
5824                 greedy_pkgs = []
5825                 slots.remove(highest_pkg.metadata["SLOT"])
5826                 while slots:
5827                         slot = slots.pop()
5828                         slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5829                         pkg, in_graph = self._select_package(root_config.root, slot_atom)
5830                         if pkg is not None and \
5831                                 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
5832                                 greedy_pkgs.append(pkg)
5833                 if not greedy_pkgs:
5834                         return []
5835                 if not blocker_lookahead:
5836                         return [pkg.slot_atom for pkg in greedy_pkgs]
5837
5838                 blockers = {}
5839                 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5840                 for pkg in greedy_pkgs + [highest_pkg]:
5841                         dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5842                         try:
5843                                 atoms = self._select_atoms(
5844                                         pkg.root, dep_str, pkg.use.enabled,
5845                                         parent=pkg, strict=True)
5846                         except portage.exception.InvalidDependString:
5847                                 continue
5848                         blocker_atoms = (x for x in atoms if x.blocker)
5849                         blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5850
5851                 if highest_pkg not in blockers:
5852                         return []
5853
5854                 # filter packages with invalid deps
5855                 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5856
5857                 # filter packages that conflict with highest_pkg
5858                 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5859                         (blockers[highest_pkg].findAtomForPackage(pkg) or \
5860                         blockers[pkg].findAtomForPackage(highest_pkg))]
5861
5862                 if not greedy_pkgs:
5863                         return []
5864
5865                 # If two packages conflict, discard the lower version.
5866                 discard_pkgs = set()
5867                 greedy_pkgs.sort(reverse=True)
5868                 for i in xrange(len(greedy_pkgs) - 1):
5869                         pkg1 = greedy_pkgs[i]
5870                         if pkg1 in discard_pkgs:
5871                                 continue
5872                         for j in xrange(i + 1, len(greedy_pkgs)):
5873                                 pkg2 = greedy_pkgs[j]
5874                                 if pkg2 in discard_pkgs:
5875                                         continue
5876                                 if blockers[pkg1].findAtomForPackage(pkg2) or \
5877                                         blockers[pkg2].findAtomForPackage(pkg1):
5878                                         # pkg1 > pkg2
5879                                         discard_pkgs.add(pkg2)
5880
5881                 return [pkg.slot_atom for pkg in greedy_pkgs \
5882                         if pkg not in discard_pkgs]
5883
5884         def _select_atoms_from_graph(self, *pargs, **kwargs):
5885                 """
5886                 Prefer atoms matching packages that have already been
5887                 added to the graph or those that are installed and have
5888                 not been scheduled for replacement.
5889                 """
5890                 kwargs["trees"] = self._graph_trees
5891                 return self._select_atoms_highest_available(*pargs, **kwargs)
5892
5893         def _select_atoms_highest_available(self, root, depstring,
5894                 myuse=None, parent=None, strict=True, trees=None, priority=None):
5895                 """This will raise InvalidDependString if necessary. If trees is
5896                 None then self._filtered_trees is used."""
5897                 pkgsettings = self.pkgsettings[root]
5898                 if trees is None:
5899                         trees = self._filtered_trees
5900                 if not getattr(priority, "buildtime", False):
5901                         # The parent should only be passed to dep_check() for buildtime
5902                         # dependencies since that's the only case when it's appropriate
5903                         # to trigger the circular dependency avoidance code which uses it.
5904                         # It's important not to trigger the same circular dependency
5905                         # avoidance code for runtime dependencies since it's not needed
5906                         # and it can promote an incorrect package choice.
5907                         parent = None
5908                 if True:
5909                         try:
5910                                 if parent is not None:
5911                                         trees[root]["parent"] = parent
5912                                 if not strict:
5913                                         portage.dep._dep_check_strict = False
5914                                 mycheck = portage.dep_check(depstring, None,
5915                                         pkgsettings, myuse=myuse,
5916                                         myroot=root, trees=trees)
5917                         finally:
5918                                 if parent is not None:
5919                                         trees[root].pop("parent")
5920                                 portage.dep._dep_check_strict = True
5921                         if not mycheck[0]:
5922                                 raise portage.exception.InvalidDependString(mycheck[1])
5923                         selected_atoms = mycheck[1]
5924                 return selected_atoms
5925
5926         def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5927                 atom = portage.dep.Atom(atom)
5928                 atom_set = InternalPackageSet(initial_atoms=(atom,))
5929                 atom_without_use = atom
5930                 if atom.use:
5931                         atom_without_use = portage.dep.remove_slot(atom)
5932                         if atom.slot:
5933                                 atom_without_use += ":" + atom.slot
5934                         atom_without_use = portage.dep.Atom(atom_without_use)
5935                 xinfo = '"%s"' % atom
5936                 if arg:
5937                         xinfo='"%s"' % arg
5938                 # Discard null/ from failed cpv_expand category expansion.
5939                 xinfo = xinfo.replace("null/", "")
5940                 masked_packages = []
5941                 missing_use = []
5942                 masked_pkg_instances = set()
5943                 missing_licenses = []
5944                 have_eapi_mask = False
5945                 pkgsettings = self.pkgsettings[root]
5946                 implicit_iuse = pkgsettings._get_implicit_iuse()
5947                 root_config = self.roots[root]
5948                 portdb = self.roots[root].trees["porttree"].dbapi
5949                 dbs = self._filtered_trees[root]["dbs"]
5950                 for db, pkg_type, built, installed, db_keys in dbs:
5951                         if installed:
5952                                 continue
5953                         match = db.match
5954                         if hasattr(db, "xmatch"):
5955                                 cpv_list = db.xmatch("match-all", atom_without_use)
5956                         else:
5957                                 cpv_list = db.match(atom_without_use)
5958                         # descending order
5959                         cpv_list.reverse()
5960                         for cpv in cpv_list:
5961                                 metadata, mreasons  = get_mask_info(root_config, cpv,
5962                                         pkgsettings, db, pkg_type, built, installed, db_keys)
5963                                 if metadata is not None:
5964                                         pkg = Package(built=built, cpv=cpv,
5965                                                 installed=installed, metadata=metadata,
5966                                                 root_config=root_config)
5967                                         if pkg.cp != atom.cp:
5968                                                 # A cpv can be returned from dbapi.match() as an
5969                                                 # old-style virtual match even in cases when the
5970                                                 # package does not actually PROVIDE the virtual.
5971                                                 # Filter out any such false matches here.
5972                                                 if not atom_set.findAtomForPackage(pkg):
5973                                                         continue
5974                                         if mreasons:
5975                                                 masked_pkg_instances.add(pkg)
5976                                         if atom.use:
5977                                                 missing_use.append(pkg)
5978                                                 if not mreasons:
5979                                                         continue
5980                                 masked_packages.append(
5981                                         (root_config, pkgsettings, cpv, metadata, mreasons))
5982
5983                 missing_use_reasons = []
5984                 missing_iuse_reasons = []
5985                 for pkg in missing_use:
5986                         use = pkg.use.enabled
5987                         iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
5988                         iuse_re = re.compile("^(%s)$" % "|".join(iuse))
5989                         missing_iuse = []
5990                         for x in atom.use.required:
5991                                 if iuse_re.match(x) is None:
5992                                         missing_iuse.append(x)
5993                         mreasons = []
5994                         if missing_iuse:
5995                                 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
5996                                 missing_iuse_reasons.append((pkg, mreasons))
5997                         else:
5998                                 need_enable = sorted(atom.use.enabled.difference(use))
5999                                 need_disable = sorted(atom.use.disabled.intersection(use))
6000                                 if need_enable or need_disable:
6001                                         changes = []
6002                                         changes.extend(colorize("red", "+" + x) \
6003                                                 for x in need_enable)
6004                                         changes.extend(colorize("blue", "-" + x) \
6005                                                 for x in need_disable)
6006                                         mreasons.append("Change USE: %s" % " ".join(changes))
6007                                         missing_use_reasons.append((pkg, mreasons))
6008
6009                 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6010                         in missing_use_reasons if pkg not in masked_pkg_instances]
6011
6012                 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6013                         in missing_iuse_reasons if pkg not in masked_pkg_instances]
6014
6015                 show_missing_use = False
6016                 if unmasked_use_reasons:
6017                         # Only show the latest version.
6018                         show_missing_use = unmasked_use_reasons[:1]
6019                 elif unmasked_iuse_reasons:
6020                         if missing_use_reasons:
6021                                 # All packages with required IUSE are masked,
6022                                 # so display a normal masking message.
6023                                 pass
6024                         else:
6025                                 show_missing_use = unmasked_iuse_reasons
6026
6027                 if show_missing_use:
6028                         print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
6029                         print "!!! One of the following packages is required to complete your request:"
6030                         for pkg, mreasons in show_missing_use:
6031                                 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
6032
6033                 elif masked_packages:
6034                         print "\n!!! " + \
6035                                 colorize("BAD", "All ebuilds that could satisfy ") + \
6036                                 colorize("INFORM", xinfo) + \
6037                                 colorize("BAD", " have been masked.")
6038                         print "!!! One of the following masked packages is required to complete your request:"
6039                         have_eapi_mask = show_masked_packages(masked_packages)
6040                         if have_eapi_mask:
6041                                 print
6042                                 msg = ("The current version of portage supports " + \
6043                                         "EAPI '%s'. You must upgrade to a newer version" + \
6044                                         " of portage before EAPI masked packages can" + \
6045                                         " be installed.") % portage.const.EAPI
6046                                 from textwrap import wrap
6047                                 for line in wrap(msg, 75):
6048                                         print line
6049                         print
6050                         show_mask_docs()
6051                 else:
6052                         print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
6053
6054                 # Show parent nodes and the argument that pulled them in.
6055                 traversed_nodes = set()
6056                 node = myparent
6057                 msg = []
6058                 while node is not None:
6059                         traversed_nodes.add(node)
6060                         msg.append('(dependency required by "%s" [%s])' % \
6061                                 (colorize('INFORM', str(node.cpv)), node.type_name))
6062                         # When traversing to parents, prefer arguments over packages
6063                         # since arguments are root nodes. Never traverse the same
6064                         # package twice, in order to prevent an infinite loop.
6065                         selected_parent = None
6066                         for parent in self.digraph.parent_nodes(node):
6067                                 if isinstance(parent, DependencyArg):
6068                                         msg.append('(dependency required by "%s" [argument])' % \
6069                                                 (colorize('INFORM', str(parent))))
6070                                         selected_parent = None
6071                                         break
6072                                 if parent not in traversed_nodes:
6073                                         selected_parent = parent
6074                         node = selected_parent
6075                 for line in msg:
6076                         print line
6077
6078                 print
6079
6080         def _select_pkg_highest_available(self, root, atom, onlydeps=False):
6081                 cache_key = (root, atom, onlydeps)
6082                 ret = self._highest_pkg_cache.get(cache_key)
6083                 if ret is not None:
6084                         pkg, existing = ret
6085                         if pkg and not existing:
6086                                 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
6087                                 if existing and existing == pkg:
6088                                         # Update the cache to reflect that the
6089                                         # package has been added to the graph.
6090                                         ret = pkg, pkg
6091                                         self._highest_pkg_cache[cache_key] = ret
6092                         return ret
6093                 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
6094                 self._highest_pkg_cache[cache_key] = ret
6095                 pkg, existing = ret
6096                 if pkg is not None:
6097                         settings = pkg.root_config.settings
6098                         if visible(settings, pkg) and not (pkg.installed and \
6099                                 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
6100                                 pkg.root_config.visible_pkgs.cpv_inject(pkg)
6101                 return ret
6102
6103         def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
6104                 root_config = self.roots[root]
6105                 pkgsettings = self.pkgsettings[root]
6106                 dbs = self._filtered_trees[root]["dbs"]
6107                 vardb = self.roots[root].trees["vartree"].dbapi
6108                 portdb = self.roots[root].trees["porttree"].dbapi
6109                 # List of acceptable packages, ordered by type preference.
6110                 matched_packages = []
6111                 highest_version = None
6112                 if not isinstance(atom, portage.dep.Atom):
6113                         atom = portage.dep.Atom(atom)
6114                 atom_cp = atom.cp
6115                 atom_set = InternalPackageSet(initial_atoms=(atom,))
6116                 existing_node = None
6117                 myeb = None
6118                 usepkgonly = "--usepkgonly" in self.myopts
6119                 empty = "empty" in self.myparams
6120                 selective = "selective" in self.myparams
6121                 reinstall = False
6122                 noreplace = "--noreplace" in self.myopts
6123                 # Behavior of the "selective" parameter depends on
6124                 # whether or not a package matches an argument atom.
6125                 # If an installed package provides an old-style
6126                 # virtual that is no longer provided by an available
6127                 # package, the installed package may match an argument
6128                 # atom even though none of the available packages do.
6129                 # Therefore, "selective" logic does not consider
6130                 # whether or not an installed package matches an
6131                 # argument atom. It only considers whether or not
6132                 # available packages match argument atoms, which is
6133                 # represented by the found_available_arg flag.
6134                 found_available_arg = False
6135                 for find_existing_node in True, False:
6136                         if existing_node:
6137                                 break
6138                         for db, pkg_type, built, installed, db_keys in dbs:
6139                                 if existing_node:
6140                                         break
6141                                 if installed and not find_existing_node:
6142                                         want_reinstall = reinstall or empty or \
6143                                                 (found_available_arg and not selective)
6144                                         if want_reinstall and matched_packages:
6145                                                 continue
6146                                 if hasattr(db, "xmatch"):
6147                                         cpv_list = db.xmatch("match-all", atom)
6148                                 else:
6149                                         cpv_list = db.match(atom)
6150
6151                                 # USE=multislot can make an installed package appear as if
6152                                 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
6153                                 # won't do any good as long as USE=multislot is enabled since
6154                                 # the newly built package still won't have the expected slot.
6155                                 # Therefore, assume that such SLOT dependencies are already
6156                                 # satisfied rather than forcing a rebuild.
6157                                 if installed and not cpv_list and atom.slot:
6158                                         for cpv in db.match(atom.cp):
6159                                                 slot_available = False
6160                                                 for other_db, other_type, other_built, \
6161                                                         other_installed, other_keys in dbs:
6162                                                         try:
6163                                                                 if atom.slot == \
6164                                                                         other_db.aux_get(cpv, ["SLOT"])[0]:
6165                                                                         slot_available = True
6166                                                                         break
6167                                                         except KeyError:
6168                                                                 pass
6169                                                 if not slot_available:
6170                                                         continue
6171                                                 inst_pkg = self._pkg(cpv, "installed",
6172                                                         root_config, installed=installed)
6173                                                 # Remove the slot from the atom and verify that
6174                                                 # the package matches the resulting atom.
6175                                                 atom_without_slot = portage.dep.remove_slot(atom)
6176                                                 if atom.use:
6177                                                         atom_without_slot += str(atom.use)
6178                                                 atom_without_slot = portage.dep.Atom(atom_without_slot)
6179                                                 if portage.match_from_list(
6180                                                         atom_without_slot, [inst_pkg]):
6181                                                         cpv_list = [inst_pkg.cpv]
6182                                                 break
6183
6184                                 if not cpv_list:
6185                                         continue
6186                                 pkg_status = "merge"
6187                                 if installed or onlydeps:
6188                                         pkg_status = "nomerge"
6189                                 # descending order
6190                                 cpv_list.reverse()
6191                                 for cpv in cpv_list:
6192                                         # Make --noreplace take precedence over --newuse.
6193                                         if not installed and noreplace and \
6194                                                 cpv in vardb.match(atom):
6195                                                 # If the installed version is masked, it may
6196                                                 # be necessary to look at lower versions,
6197                                                 # in case there is a visible downgrade.
6198                                                 continue
6199                                         reinstall_for_flags = None
6200                                         cache_key = (pkg_type, root, cpv, pkg_status)
6201                                         calculated_use = True
6202                                         pkg = self._pkg_cache.get(cache_key)
6203                                         if pkg is None:
6204                                                 calculated_use = False
6205                                                 try:
6206                                                         metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6207                                                 except KeyError:
6208                                                         continue
6209                                                 pkg = Package(built=built, cpv=cpv,
6210                                                         installed=installed, metadata=metadata,
6211                                                         onlydeps=onlydeps, root_config=root_config,
6212                                                         type_name=pkg_type)
6213                                                 metadata = pkg.metadata
6214                                                 if not built:
6215                                                         metadata['CHOST'] = pkgsettings.get('CHOST', '')
6216                                                 if not built and ("?" in metadata["LICENSE"] or \
6217                                                         "?" in metadata["PROVIDE"]):
6218                                                         # This is avoided whenever possible because
6219                                                         # it's expensive. It only needs to be done here
6220                                                         # if it has an effect on visibility.
6221                                                         pkgsettings.setcpv(pkg)
6222                                                         metadata["USE"] = pkgsettings["PORTAGE_USE"]
6223                                                         calculated_use = True
6224                                                 self._pkg_cache[pkg] = pkg
6225
6226                                         if not installed or (built and matched_packages):
6227                                                 # Only enforce visibility on installed packages
6228                                                 # if there is at least one other visible package
6229                                                 # available. By filtering installed masked packages
6230                                                 # here, packages that have been masked since they
6231                                                 # were installed can be automatically downgraded
6232                                                 # to an unmasked version.
6233                                                 try:
6234                                                         if not visible(pkgsettings, pkg):
6235                                                                 continue
6236                                                 except portage.exception.InvalidDependString:
6237                                                         if not installed:
6238                                                                 continue
6239
6240                                                 # Enable upgrade or downgrade to a version
6241                                                 # with visible KEYWORDS when the installed
6242                                                 # version is masked by KEYWORDS, but never
6243                                                 # reinstall the same exact version only due
6244                                                 # to a KEYWORDS mask.
6245                                                 if built and matched_packages:
6246
6247                                                         different_version = None
6248                                                         for avail_pkg in matched_packages:
6249                                                                 if not portage.dep.cpvequal(
6250                                                                         pkg.cpv, avail_pkg.cpv):
6251                                                                         different_version = avail_pkg
6252                                                                         break
6253                                                         if different_version is not None:
6254
6255                                                                 if installed and \
6256                                                                         pkgsettings._getMissingKeywords(
6257                                                                         pkg.cpv, pkg.metadata):
6258                                                                         continue
6259
6260                                                                 # If the ebuild no longer exists or it's
6261                                                                 # keywords have been dropped, reject built
6262                                                                 # instances (installed or binary).
6263                                                                 # If --usepkgonly is enabled, assume that
6264                                                                 # the ebuild status should be ignored.
6265                                                                 if not usepkgonly:
6266                                                                         try:
6267                                                                                 pkg_eb = self._pkg(
6268                                                                                         pkg.cpv, "ebuild", root_config)
6269                                                                         except portage.exception.PackageNotFound:
6270                                                                                 continue
6271                                                                         else:
6272                                                                                 if not visible(pkgsettings, pkg_eb):
6273                                                                                         continue
6274
6275                                         if not pkg.built and not calculated_use:
6276                                                 # This is avoided whenever possible because
6277                                                 # it's expensive.
6278                                                 pkgsettings.setcpv(pkg)
6279                                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6280
6281                                         if pkg.cp != atom.cp:
6282                                                 # A cpv can be returned from dbapi.match() as an
6283                                                 # old-style virtual match even in cases when the
6284                                                 # package does not actually PROVIDE the virtual.
6285                                                 # Filter out any such false matches here.
6286                                                 if not atom_set.findAtomForPackage(pkg):
6287                                                         continue
6288
6289                                         myarg = None
6290                                         if root == self.target_root:
6291                                                 try:
6292                                                         # Ebuild USE must have been calculated prior
6293                                                         # to this point, in case atoms have USE deps.
6294                                                         myarg = self._iter_atoms_for_pkg(pkg).next()
6295                                                 except StopIteration:
6296                                                         pass
6297                                                 except portage.exception.InvalidDependString:
6298                                                         if not installed:
6299                                                                 # masked by corruption
6300                                                                 continue
6301                                         if not installed and myarg:
6302                                                 found_available_arg = True
6303
6304                                         if atom.use and not pkg.built:
6305                                                 use = pkg.use.enabled
6306                                                 if atom.use.enabled.difference(use):
6307                                                         continue
6308                                                 if atom.use.disabled.intersection(use):
6309                                                         continue
6310                                         if pkg.cp == atom_cp:
6311                                                 if highest_version is None:
6312                                                         highest_version = pkg
6313                                                 elif pkg > highest_version:
6314                                                         highest_version = pkg
6315                                         # At this point, we've found the highest visible
6316                                         # match from the current repo. Any lower versions
6317                                         # from this repo are ignored, so this so the loop
6318                                         # will always end with a break statement below
6319                                         # this point.
6320                                         if find_existing_node:
6321                                                 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6322                                                 if not e_pkg:
6323                                                         break
6324                                                 if portage.dep.match_from_list(atom, [e_pkg]):
6325                                                         if highest_version and \
6326                                                                 e_pkg.cp == atom_cp and \
6327                                                                 e_pkg < highest_version and \
6328                                                                 e_pkg.slot_atom != highest_version.slot_atom:
6329                                                                 # There is a higher version available in a
6330                                                                 # different slot, so this existing node is
6331                                                                 # irrelevant.
6332                                                                 pass
6333                                                         else:
6334                                                                 matched_packages.append(e_pkg)
6335                                                                 existing_node = e_pkg
6336                                                 break
6337                                         # Compare built package to current config and
6338                                         # reject the built package if necessary.
6339                                         if built and not installed and \
6340                                                 ("--newuse" in self.myopts or \
6341                                                 "--reinstall" in self.myopts):
6342                                                 iuses = pkg.iuse.all
6343                                                 old_use = pkg.use.enabled
6344                                                 if myeb:
6345                                                         pkgsettings.setcpv(myeb)
6346                                                 else:
6347                                                         pkgsettings.setcpv(pkg)
6348                                                 now_use = pkgsettings["PORTAGE_USE"].split()
6349                                                 forced_flags = set()
6350                                                 forced_flags.update(pkgsettings.useforce)
6351                                                 forced_flags.update(pkgsettings.usemask)
6352                                                 cur_iuse = iuses
6353                                                 if myeb and not usepkgonly:
6354                                                         cur_iuse = myeb.iuse.all
6355                                                 if self._reinstall_for_flags(forced_flags,
6356                                                         old_use, iuses,
6357                                                         now_use, cur_iuse):
6358                                                         break
6359                                         # Compare current config to installed package
6360                                         # and do not reinstall if possible.
6361                                         if not installed and \
6362                                                 ("--newuse" in self.myopts or \
6363                                                 "--reinstall" in self.myopts) and \
6364                                                 cpv in vardb.match(atom):
6365                                                 pkgsettings.setcpv(pkg)
6366                                                 forced_flags = set()
6367                                                 forced_flags.update(pkgsettings.useforce)
6368                                                 forced_flags.update(pkgsettings.usemask)
6369                                                 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6370                                                 old_iuse = set(filter_iuse_defaults(
6371                                                         vardb.aux_get(cpv, ["IUSE"])[0].split()))
6372                                                 cur_use = pkgsettings["PORTAGE_USE"].split()
6373                                                 cur_iuse = pkg.iuse.all
6374                                                 reinstall_for_flags = \
6375                                                         self._reinstall_for_flags(
6376                                                         forced_flags, old_use, old_iuse,
6377                                                         cur_use, cur_iuse)
6378                                                 if reinstall_for_flags:
6379                                                         reinstall = True
6380                                         if not built:
6381                                                 myeb = pkg
6382                                         matched_packages.append(pkg)
6383                                         if reinstall_for_flags:
6384                                                 self._reinstall_nodes[pkg] = \
6385                                                         reinstall_for_flags
6386                                         break
6387
6388                 if not matched_packages:
6389                         return None, None
6390
6391                 if "--debug" in self.myopts:
6392                         for pkg in matched_packages:
6393                                 portage.writemsg("%s %s\n" % \
6394                                         ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6395
6396                 # Filter out any old-style virtual matches if they are
6397                 # mixed with new-style virtual matches.
6398                 cp = portage.dep_getkey(atom)
6399                 if len(matched_packages) > 1 and \
6400                         "virtual" == portage.catsplit(cp)[0]:
6401                         for pkg in matched_packages:
6402                                 if pkg.cp != cp:
6403                                         continue
6404                                 # Got a new-style virtual, so filter
6405                                 # out any old-style virtuals.
6406                                 matched_packages = [pkg for pkg in matched_packages \
6407                                         if pkg.cp == cp]
6408                                 break
6409
6410                 if len(matched_packages) > 1:
6411                         bestmatch = portage.best(
6412                                 [pkg.cpv for pkg in matched_packages])
6413                         matched_packages = [pkg for pkg in matched_packages \
6414                                 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6415
6416                 # ordered by type preference ("ebuild" type is the last resort)
6417                 return  matched_packages[-1], existing_node
6418
6419         def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6420                 """
6421                 Select packages that have already been added to the graph or
6422                 those that are installed and have not been scheduled for
6423                 replacement.
6424                 """
6425                 graph_db = self._graph_trees[root]["porttree"].dbapi
6426                 matches = graph_db.match_pkgs(atom)
6427                 if not matches:
6428                         return None, None
6429                 pkg = matches[-1] # highest match
6430                 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
6431                 return pkg, in_graph
6432
6433         def _complete_graph(self):
6434                 """
6435                 Add any deep dependencies of required sets (args, system, world) that
6436                 have not been pulled into the graph yet. This ensures that the graph
6437                 is consistent such that initially satisfied deep dependencies are not
6438                 broken in the new graph. Initially unsatisfied dependencies are
6439                 irrelevant since we only want to avoid breaking dependencies that are
6440                 intially satisfied.
6441
6442                 Since this method can consume enough time to disturb users, it is
6443                 currently only enabled by the --complete-graph option.
6444                 """
6445                 if "--buildpkgonly" in self.myopts or \
6446                         "recurse" not in self.myparams:
6447                         return 1
6448
6449                 if "complete" not in self.myparams:
6450                         # Skip this to avoid consuming enough time to disturb users.
6451                         return 1
6452
6453                 # Put the depgraph into a mode that causes it to only
6454                 # select packages that have already been added to the
6455                 # graph or those that are installed and have not been
6456                 # scheduled for replacement. Also, toggle the "deep"
6457                 # parameter so that all dependencies are traversed and
6458                 # accounted for.
6459                 self._select_atoms = self._select_atoms_from_graph
6460                 self._select_package = self._select_pkg_from_graph
6461                 already_deep = "deep" in self.myparams
6462                 if not already_deep:
6463                         self.myparams.add("deep")
6464
6465                 for root in self.roots:
6466                         required_set_names = self._required_set_names.copy()
6467                         if root == self.target_root and \
6468                                 (already_deep or "empty" in self.myparams):
6469                                 required_set_names.difference_update(self._sets)
6470                         if not required_set_names and not self._ignored_deps:
6471                                 continue
6472                         root_config = self.roots[root]
6473                         setconfig = root_config.setconfig
6474                         args = []
6475                         # Reuse existing SetArg instances when available.
6476                         for arg in self.digraph.root_nodes():
6477                                 if not isinstance(arg, SetArg):
6478                                         continue
6479                                 if arg.root_config != root_config:
6480                                         continue
6481                                 if arg.name in required_set_names:
6482                                         args.append(arg)
6483                                         required_set_names.remove(arg.name)
6484                         # Create new SetArg instances only when necessary.
6485                         for s in required_set_names:
6486                                 expanded_set = InternalPackageSet(
6487                                         initial_atoms=setconfig.getSetAtoms(s))
6488                                 atom = SETPREFIX + s
6489                                 args.append(SetArg(arg=atom, set=expanded_set,
6490                                         root_config=root_config))
6491                         vardb = root_config.trees["vartree"].dbapi
6492                         for arg in args:
6493                                 for atom in arg.set:
6494                                         self._dep_stack.append(
6495                                                 Dependency(atom=atom, root=root, parent=arg))
6496                         if self._ignored_deps:
6497                                 self._dep_stack.extend(self._ignored_deps)
6498                                 self._ignored_deps = []
6499                         if not self._create_graph(allow_unsatisfied=True):
6500                                 return 0
6501                         # Check the unsatisfied deps to see if any initially satisfied deps
6502                         # will become unsatisfied due to an upgrade. Initially unsatisfied
6503                         # deps are irrelevant since we only want to avoid breaking deps
6504                         # that are initially satisfied.
6505                         while self._unsatisfied_deps:
6506                                 dep = self._unsatisfied_deps.pop()
6507                                 matches = vardb.match_pkgs(dep.atom)
6508                                 if not matches:
6509                                         self._initially_unsatisfied_deps.append(dep)
6510                                         continue
6511                                 # An scheduled installation broke a deep dependency.
6512                                 # Add the installed package to the graph so that it
6513                                 # will be appropriately reported as a slot collision
6514                                 # (possibly solvable via backtracking).
6515                                 pkg = matches[-1] # highest match
6516                                 if not self._add_pkg(pkg, dep):
6517                                         return 0
6518                                 if not self._create_graph(allow_unsatisfied=True):
6519                                         return 0
6520                 return 1
6521
6522         def _pkg(self, cpv, type_name, root_config, installed=False):
6523                 """
6524                 Get a package instance from the cache, or create a new
6525                 one if necessary. Raises KeyError from aux_get if it
6526                 failures for some reason (package does not exist or is
6527                 corrupt).
6528                 """
6529                 operation = "merge"
6530                 if installed:
6531                         operation = "nomerge"
6532                 pkg = self._pkg_cache.get(
6533                         (type_name, root_config.root, cpv, operation))
6534                 if pkg is None:
6535                         tree_type = self.pkg_tree_map[type_name]
6536                         db = root_config.trees[tree_type].dbapi
6537                         db_keys = list(self._trees_orig[root_config.root][
6538                                 tree_type].dbapi._aux_cache_keys)
6539                         try:
6540                                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6541                         except KeyError:
6542                                 raise portage.exception.PackageNotFound(cpv)
6543                         pkg = Package(cpv=cpv, metadata=metadata,
6544                                 root_config=root_config, installed=installed)
6545                         if type_name == "ebuild":
6546                                 settings = self.pkgsettings[root_config.root]
6547                                 settings.setcpv(pkg)
6548                                 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6549                                 pkg.metadata['CHOST'] = settings.get('CHOST', '')
6550                         self._pkg_cache[pkg] = pkg
6551                 return pkg
6552
6553         def validate_blockers(self):
6554                 """Remove any blockers from the digraph that do not match any of the
6555                 packages within the graph.  If necessary, create hard deps to ensure
6556                 correct merge order such that mutually blocking packages are never
6557                 installed simultaneously."""
6558
6559                 if "--buildpkgonly" in self.myopts or \
6560                         "--nodeps" in self.myopts:
6561                         return True
6562
6563                 #if "deep" in self.myparams:
6564                 if True:
6565                         # Pull in blockers from all installed packages that haven't already
6566                         # been pulled into the depgraph.  This is not enabled by default
6567                         # due to the performance penalty that is incurred by all the
6568                         # additional dep_check calls that are required.
6569
6570                         dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6571                         for myroot in self.trees:
6572                                 vardb = self.trees[myroot]["vartree"].dbapi
6573                                 portdb = self.trees[myroot]["porttree"].dbapi
6574                                 pkgsettings = self.pkgsettings[myroot]
6575                                 final_db = self.mydbapi[myroot]
6576
6577                                 blocker_cache = BlockerCache(myroot, vardb)
6578                                 stale_cache = set(blocker_cache)
6579                                 for pkg in vardb:
6580                                         cpv = pkg.cpv
6581                                         stale_cache.discard(cpv)
6582                                         pkg_in_graph = self.digraph.contains(pkg)
6583
6584                                         # Check for masked installed packages. Only warn about
6585                                         # packages that are in the graph in order to avoid warning
6586                                         # about those that will be automatically uninstalled during
6587                                         # the merge process or by --depclean.
6588                                         if pkg in final_db:
6589                                                 if pkg_in_graph and not visible(pkgsettings, pkg):
6590                                                         self._masked_installed.add(pkg)
6591
6592                                         blocker_atoms = None
6593                                         blockers = None
6594                                         if pkg_in_graph:
6595                                                 blockers = []
6596                                                 try:
6597                                                         blockers.extend(
6598                                                                 self._blocker_parents.child_nodes(pkg))
6599                                                 except KeyError:
6600                                                         pass
6601                                                 try:
6602                                                         blockers.extend(
6603                                                                 self._irrelevant_blockers.child_nodes(pkg))
6604                                                 except KeyError:
6605                                                         pass
6606                                         if blockers is not None:
6607                                                 blockers = set(str(blocker.atom) \
6608                                                         for blocker in blockers)
6609
6610                                         # If this node has any blockers, create a "nomerge"
6611                                         # node for it so that they can be enforced.
6612                                         self.spinner.update()
6613                                         blocker_data = blocker_cache.get(cpv)
6614                                         if blocker_data is not None and \
6615                                                 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6616                                                 blocker_data = None
6617
6618                                         # If blocker data from the graph is available, use
6619                                         # it to validate the cache and update the cache if
6620                                         # it seems invalid.
6621                                         if blocker_data is not None and \
6622                                                 blockers is not None:
6623                                                 if not blockers.symmetric_difference(
6624                                                         blocker_data.atoms):
6625                                                         continue
6626                                                 blocker_data = None
6627
6628                                         if blocker_data is None and \
6629                                                 blockers is not None:
6630                                                 # Re-use the blockers from the graph.
6631                                                 blocker_atoms = sorted(blockers)
6632                                                 counter = long(pkg.metadata["COUNTER"])
6633                                                 blocker_data = \
6634                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6635                                                 blocker_cache[pkg.cpv] = blocker_data
6636                                                 continue
6637
6638                                         if blocker_data:
6639                                                 blocker_atoms = blocker_data.atoms
6640                                         else:
6641                                                 # Use aux_get() to trigger FakeVartree global
6642                                                 # updates on *DEPEND when appropriate.
6643                                                 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6644                                                 # It is crucial to pass in final_db here in order to
6645                                                 # optimize dep_check calls by eliminating atoms via
6646                                                 # dep_wordreduce and dep_eval calls.
6647                                                 try:
6648                                                         portage.dep._dep_check_strict = False
6649                                                         try:
6650                                                                 success, atoms = portage.dep_check(depstr,
6651                                                                         final_db, pkgsettings, myuse=pkg.use.enabled,
6652                                                                         trees=self._graph_trees, myroot=myroot)
6653                                                         except Exception, e:
6654                                                                 if isinstance(e, SystemExit):
6655                                                                         raise
6656                                                                 # This is helpful, for example, if a ValueError
6657                                                                 # is thrown from cpv_expand due to multiple
6658                                                                 # matches (this can happen if an atom lacks a
6659                                                                 # category).
6660                                                                 show_invalid_depstring_notice(
6661                                                                         pkg, depstr, str(e))
6662                                                                 del e
6663                                                                 raise
6664                                                 finally:
6665                                                         portage.dep._dep_check_strict = True
6666                                                 if not success:
6667                                                         replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6668                                                         if replacement_pkg and \
6669                                                                 replacement_pkg[0].operation == "merge":
6670                                                                 # This package is being replaced anyway, so
6671                                                                 # ignore invalid dependencies so as not to
6672                                                                 # annoy the user too much (otherwise they'd be
6673                                                                 # forced to manually unmerge it first).
6674                                                                 continue
6675                                                         show_invalid_depstring_notice(pkg, depstr, atoms)
6676                                                         return False
6677                                                 blocker_atoms = [myatom for myatom in atoms \
6678                                                         if myatom.startswith("!")]
6679                                                 blocker_atoms.sort()
6680                                                 counter = long(pkg.metadata["COUNTER"])
6681                                                 blocker_cache[cpv] = \
6682                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6683                                         if blocker_atoms:
6684                                                 try:
6685                                                         for atom in blocker_atoms:
6686                                                                 blocker = Blocker(atom=portage.dep.Atom(atom),
6687                                                                         eapi=pkg.metadata["EAPI"], root=myroot)
6688                                                                 self._blocker_parents.add(blocker, pkg)
6689                                                 except portage.exception.InvalidAtom, e:
6690                                                         depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6691                                                         show_invalid_depstring_notice(
6692                                                                 pkg, depstr, "Invalid Atom: %s" % (e,))
6693                                                         return False
6694                                 for cpv in stale_cache:
6695                                         del blocker_cache[cpv]
6696                                 blocker_cache.flush()
6697                                 del blocker_cache
6698
6699                 # Discard any "uninstall" tasks scheduled by previous calls
6700                 # to this method, since those tasks may not make sense given
6701                 # the current graph state.
6702                 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6703                 if previous_uninstall_tasks:
6704                         self._blocker_uninstalls = digraph()
6705                         self.digraph.difference_update(previous_uninstall_tasks)
6706
6707                 for blocker in self._blocker_parents.leaf_nodes():
6708                         self.spinner.update()
6709                         root_config = self.roots[blocker.root]
6710                         virtuals = root_config.settings.getvirtuals()
6711                         myroot = blocker.root
6712                         initial_db = self.trees[myroot]["vartree"].dbapi
6713                         final_db = self.mydbapi[myroot]
6714                         
6715                         provider_virtual = False
6716                         if blocker.cp in virtuals and \
6717                                 not self._have_new_virt(blocker.root, blocker.cp):
6718                                 provider_virtual = True
6719
6720                         if provider_virtual:
6721                                 atoms = []
6722                                 for provider_entry in virtuals[blocker.cp]:
6723                                         provider_cp = \
6724                                                 portage.dep_getkey(provider_entry)
6725                                         atoms.append(blocker.atom.replace(
6726                                                 blocker.cp, provider_cp))
6727                         else:
6728                                 atoms = [blocker.atom]
6729
6730                         blocked_initial = []
6731                         for atom in atoms:
6732                                 blocked_initial.extend(initial_db.match_pkgs(atom))
6733
6734                         blocked_final = []
6735                         for atom in atoms:
6736                                 blocked_final.extend(final_db.match_pkgs(atom))
6737
6738                         if not blocked_initial and not blocked_final:
6739                                 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6740                                 self._blocker_parents.remove(blocker)
6741                                 # Discard any parents that don't have any more blockers.
6742                                 for pkg in parent_pkgs:
6743                                         self._irrelevant_blockers.add(blocker, pkg)
6744                                         if not self._blocker_parents.child_nodes(pkg):
6745                                                 self._blocker_parents.remove(pkg)
6746                                 continue
6747                         for parent in self._blocker_parents.parent_nodes(blocker):
6748                                 unresolved_blocks = False
6749                                 depends_on_order = set()
6750                                 for pkg in blocked_initial:
6751                                         if pkg.slot_atom == parent.slot_atom:
6752                                                 # TODO: Support blocks within slots in cases where it
6753                                                 # might make sense.  For example, a new version might
6754                                                 # require that the old version be uninstalled at build
6755                                                 # time.
6756                                                 continue
6757                                         if parent.installed:
6758                                                 # Two currently installed packages conflict with
6759                                                 # eachother. Ignore this case since the damage
6760                                                 # is already done and this would be likely to
6761                                                 # confuse users if displayed like a normal blocker.
6762                                                 continue
6763
6764                                         self._blocked_pkgs.add(pkg, blocker)
6765
6766                                         if parent.operation == "merge":
6767                                                 # Maybe the blocked package can be replaced or simply
6768                                                 # unmerged to resolve this block.
6769                                                 depends_on_order.add((pkg, parent))
6770                                                 continue
6771                                         # None of the above blocker resolutions techniques apply,
6772                                         # so apparently this one is unresolvable.
6773                                         unresolved_blocks = True
6774                                 for pkg in blocked_final:
6775                                         if pkg.slot_atom == parent.slot_atom:
6776                                                 # TODO: Support blocks within slots.
6777                                                 continue
6778                                         if parent.operation == "nomerge" and \
6779                                                 pkg.operation == "nomerge":
6780                                                 # This blocker will be handled the next time that a
6781                                                 # merge of either package is triggered.
6782                                                 continue
6783
6784                                         self._blocked_pkgs.add(pkg, blocker)
6785
6786                                         # Maybe the blocking package can be
6787                                         # unmerged to resolve this block.
6788                                         if parent.operation == "merge" and pkg.installed:
6789                                                 depends_on_order.add((pkg, parent))
6790                                                 continue
6791                                         elif parent.operation == "nomerge":
6792                                                 depends_on_order.add((parent, pkg))
6793                                                 continue
6794                                         # None of the above blocker resolutions techniques apply,
6795                                         # so apparently this one is unresolvable.
6796                                         unresolved_blocks = True
6797
6798                                 # Make sure we don't unmerge any package that have been pulled
6799                                 # into the graph.
6800                                 if not unresolved_blocks and depends_on_order:
6801                                         for inst_pkg, inst_task in depends_on_order:
6802                                                 if self.digraph.contains(inst_pkg) and \
6803                                                         self.digraph.parent_nodes(inst_pkg):
6804                                                         unresolved_blocks = True
6805                                                         break
6806
6807                                 if not unresolved_blocks and depends_on_order:
6808                                         for inst_pkg, inst_task in depends_on_order:
6809                                                 uninst_task = Package(built=inst_pkg.built,
6810                                                         cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6811                                                         metadata=inst_pkg.metadata,
6812                                                         operation="uninstall",
6813                                                         root_config=inst_pkg.root_config,
6814                                                         type_name=inst_pkg.type_name)
6815                                                 self._pkg_cache[uninst_task] = uninst_task
6816                                                 # Enforce correct merge order with a hard dep.
6817                                                 self.digraph.addnode(uninst_task, inst_task,
6818                                                         priority=BlockerDepPriority.instance)
6819                                                 # Count references to this blocker so that it can be
6820                                                 # invalidated after nodes referencing it have been
6821                                                 # merged.
6822                                                 self._blocker_uninstalls.addnode(uninst_task, blocker)
6823                                 if not unresolved_blocks and not depends_on_order:
6824                                         self._irrelevant_blockers.add(blocker, parent)
6825                                         self._blocker_parents.remove_edge(blocker, parent)
6826                                         if not self._blocker_parents.parent_nodes(blocker):
6827                                                 self._blocker_parents.remove(blocker)
6828                                         if not self._blocker_parents.child_nodes(parent):
6829                                                 self._blocker_parents.remove(parent)
6830                                 if unresolved_blocks:
6831                                         self._unsolvable_blockers.add(blocker, parent)
6832
6833                 return True
6834
6835         def _accept_blocker_conflicts(self):
6836                 acceptable = False
6837                 for x in ("--buildpkgonly", "--fetchonly",
6838                         "--fetch-all-uri", "--nodeps"):
6839                         if x in self.myopts:
6840                                 acceptable = True
6841                                 break
6842                 return acceptable
6843
6844         def _merge_order_bias(self, mygraph):
6845                 """
6846                 For optimal leaf node selection, promote deep system runtime deps and
6847                 order nodes from highest to lowest overall reference count.
6848                 """
6849
6850                 node_info = {}
6851                 for node in mygraph.order:
6852                         node_info[node] = len(mygraph.parent_nodes(node))
6853                 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
6854
6855                 def cmp_merge_preference(node1, node2):
6856
6857                         if node1.operation == 'uninstall':
6858                                 if node2.operation == 'uninstall':
6859                                         return 0
6860                                 return 1
6861
6862                         if node2.operation == 'uninstall':
6863                                 if node1.operation == 'uninstall':
6864                                         return 0
6865                                 return -1
6866
6867                         node1_sys = node1 in deep_system_deps
6868                         node2_sys = node2 in deep_system_deps
6869                         if node1_sys != node2_sys:
6870                                 if node1_sys:
6871                                         return -1
6872                                 return 1
6873
6874                         return node_info[node2] - node_info[node1]
6875
6876                 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
6877
6878         def altlist(self, reversed=False):
6879
6880                 while self._serialized_tasks_cache is None:
6881                         self._resolve_conflicts()
6882                         try:
6883                                 self._serialized_tasks_cache, self._scheduler_graph = \
6884                                         self._serialize_tasks()
6885                         except self._serialize_tasks_retry:
6886                                 pass
6887
6888                 retlist = self._serialized_tasks_cache[:]
6889                 if reversed:
6890                         retlist.reverse()
6891                 return retlist
6892
6893         def schedulerGraph(self):
6894                 """
6895                 The scheduler graph is identical to the normal one except that
6896                 uninstall edges are reversed in specific cases that require
6897                 conflicting packages to be temporarily installed simultaneously.
6898                 This is intended for use by the Scheduler in it's parallelization
6899                 logic. It ensures that temporary simultaneous installation of
6900                 conflicting packages is avoided when appropriate (especially for
6901                 !!atom blockers), but allowed in specific cases that require it.
6902
6903                 Note that this method calls break_refs() which alters the state of
6904                 internal Package instances such that this depgraph instance should
6905                 not be used to perform any more calculations.
6906                 """
6907                 if self._scheduler_graph is None:
6908                         self.altlist()
6909                 self.break_refs(self._scheduler_graph.order)
6910                 return self._scheduler_graph
6911
6912         def break_refs(self, nodes):
6913                 """
6914                 Take a mergelist like that returned from self.altlist() and
6915                 break any references that lead back to the depgraph. This is
6916                 useful if you want to hold references to packages without
6917                 also holding the depgraph on the heap.
6918                 """
6919                 for node in nodes:
6920                         if hasattr(node, "root_config"):
6921                                 # The FakeVartree references the _package_cache which
6922                                 # references the depgraph. So that Package instances don't
6923                                 # hold the depgraph and FakeVartree on the heap, replace
6924                                 # the RootConfig that references the FakeVartree with the
6925                                 # original RootConfig instance which references the actual
6926                                 # vartree.
6927                                 node.root_config = \
6928                                         self._trees_orig[node.root_config.root]["root_config"]
6929
6930         def _resolve_conflicts(self):
6931                 if not self._complete_graph():
6932                         raise self._unknown_internal_error()
6933
6934                 if not self.validate_blockers():
6935                         raise self._unknown_internal_error()
6936
6937                 if self._slot_collision_info:
6938                         self._process_slot_conflicts()
6939
6940         def _serialize_tasks(self):
6941
6942                 if "--debug" in self.myopts:
6943                         writemsg("\ndigraph:\n\n", noiselevel=-1)
6944                         self.digraph.debug_print()
6945                         writemsg("\n", noiselevel=-1)
6946
6947                 scheduler_graph = self.digraph.copy()
6948                 mygraph=self.digraph.copy()
6949                 # Prune "nomerge" root nodes if nothing depends on them, since
6950                 # otherwise they slow down merge order calculation. Don't remove
6951                 # non-root nodes since they help optimize merge order in some cases
6952                 # such as revdep-rebuild.
6953                 removed_nodes = set()
6954                 while True:
6955                         for node in mygraph.root_nodes():
6956                                 if not isinstance(node, Package) or \
6957                                         node.installed or node.onlydeps:
6958                                         removed_nodes.add(node)
6959                         if removed_nodes:
6960                                 self.spinner.update()
6961                                 mygraph.difference_update(removed_nodes)
6962                         if not removed_nodes:
6963                                 break
6964                         removed_nodes.clear()
6965                 self._merge_order_bias(mygraph)
6966                 def cmp_circular_bias(n1, n2):
6967                         """
6968                         RDEPEND is stronger than PDEPEND and this function
6969                         measures such a strength bias within a circular
6970                         dependency relationship.
6971                         """
6972                         n1_n2_medium = n2 in mygraph.child_nodes(n1,
6973                                 ignore_priority=priority_range.ignore_medium_soft)
6974                         n2_n1_medium = n1 in mygraph.child_nodes(n2,
6975                                 ignore_priority=priority_range.ignore_medium_soft)
6976                         if n1_n2_medium == n2_n1_medium:
6977                                 return 0
6978                         elif n1_n2_medium:
6979                                 return 1
6980                         return -1
6981                 myblocker_uninstalls = self._blocker_uninstalls.copy()
6982                 retlist=[]
6983                 # Contains uninstall tasks that have been scheduled to
6984                 # occur after overlapping blockers have been installed.
6985                 scheduled_uninstalls = set()
6986                 # Contains any Uninstall tasks that have been ignored
6987                 # in order to avoid the circular deps code path. These
6988                 # correspond to blocker conflicts that could not be
6989                 # resolved.
6990                 ignored_uninstall_tasks = set()
6991                 have_uninstall_task = False
6992                 complete = "complete" in self.myparams
6993                 asap_nodes = []
6994
6995                 def get_nodes(**kwargs):
6996                         """
6997                         Returns leaf nodes excluding Uninstall instances
6998                         since those should be executed as late as possible.
6999                         """
7000                         return [node for node in mygraph.leaf_nodes(**kwargs) \
7001                                 if isinstance(node, Package) and \
7002                                         (node.operation != "uninstall" or \
7003                                         node in scheduled_uninstalls)]
7004
7005                 # sys-apps/portage needs special treatment if ROOT="/"
7006                 running_root = self._running_root.root
7007                 from portage.const import PORTAGE_PACKAGE_ATOM
7008                 runtime_deps = InternalPackageSet(
7009                         initial_atoms=[PORTAGE_PACKAGE_ATOM])
7010                 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
7011                         PORTAGE_PACKAGE_ATOM)
7012                 replacement_portage = self.mydbapi[running_root].match_pkgs(
7013                         PORTAGE_PACKAGE_ATOM)
7014
7015                 if running_portage:
7016                         running_portage = running_portage[0]
7017                 else:
7018                         running_portage = None
7019
7020                 if replacement_portage:
7021                         replacement_portage = replacement_portage[0]
7022                 else:
7023                         replacement_portage = None
7024
7025                 if replacement_portage == running_portage:
7026                         replacement_portage = None
7027
7028                 if replacement_portage is not None:
7029                         # update from running_portage to replacement_portage asap
7030                         asap_nodes.append(replacement_portage)
7031
7032                 if running_portage is not None:
7033                         try:
7034                                 portage_rdepend = self._select_atoms_highest_available(
7035                                         running_root, running_portage.metadata["RDEPEND"],
7036                                         myuse=running_portage.use.enabled,
7037                                         parent=running_portage, strict=False)
7038                         except portage.exception.InvalidDependString, e:
7039                                 portage.writemsg("!!! Invalid RDEPEND in " + \
7040                                         "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
7041                                         (running_root, running_portage.cpv, e), noiselevel=-1)
7042                                 del e
7043                                 portage_rdepend = []
7044                         runtime_deps.update(atom for atom in portage_rdepend \
7045                                 if not atom.startswith("!"))
7046
7047                 def gather_deps(ignore_priority, mergeable_nodes,
7048                         selected_nodes, node):
7049                         """
7050                         Recursively gather a group of nodes that RDEPEND on
7051                         eachother. This ensures that they are merged as a group
7052                         and get their RDEPENDs satisfied as soon as possible.
7053                         """
7054                         if node in selected_nodes:
7055                                 return True
7056                         if node not in mergeable_nodes:
7057                                 return False
7058                         if node == replacement_portage and \
7059                                 mygraph.child_nodes(node,
7060                                 ignore_priority=priority_range.ignore_medium_soft):
7061                                 # Make sure that portage always has all of it's
7062                                 # RDEPENDs installed first.
7063                                 return False
7064                         selected_nodes.add(node)
7065                         for child in mygraph.child_nodes(node,
7066                                 ignore_priority=ignore_priority):
7067                                 if not gather_deps(ignore_priority,
7068                                         mergeable_nodes, selected_nodes, child):
7069                                         return False
7070                         return True
7071
7072                 def ignore_uninst_or_med(priority):
7073                         if priority is BlockerDepPriority.instance:
7074                                 return True
7075                         return priority_range.ignore_medium(priority)
7076
7077                 def ignore_uninst_or_med_soft(priority):
7078                         if priority is BlockerDepPriority.instance:
7079                                 return True
7080                         return priority_range.ignore_medium_soft(priority)
7081
7082                 tree_mode = "--tree" in self.myopts
7083                 # Tracks whether or not the current iteration should prefer asap_nodes
7084                 # if available.  This is set to False when the previous iteration
7085                 # failed to select any nodes.  It is reset whenever nodes are
7086                 # successfully selected.
7087                 prefer_asap = True
7088
7089                 # Controls whether or not the current iteration should drop edges that
7090                 # are "satisfied" by installed packages, in order to solve circular
7091                 # dependencies. The deep runtime dependencies of installed packages are
7092                 # not checked in this case (bug #199856), so it must be avoided
7093                 # whenever possible.
7094                 drop_satisfied = False
7095
7096                 # State of variables for successive iterations that loosen the
7097                 # criteria for node selection.
7098                 #
7099                 # iteration   prefer_asap   drop_satisfied
7100                 # 1           True          False
7101                 # 2           False         False
7102                 # 3           False         True
7103                 #
7104                 # If no nodes are selected on the last iteration, it is due to
7105                 # unresolved blockers or circular dependencies.
7106
7107                 while not mygraph.empty():
7108                         self.spinner.update()
7109                         selected_nodes = None
7110                         ignore_priority = None
7111                         if drop_satisfied or (prefer_asap and asap_nodes):
7112                                 priority_range = DepPrioritySatisfiedRange
7113                         else:
7114                                 priority_range = DepPriorityNormalRange
7115                         if prefer_asap and asap_nodes:
7116                                 # ASAP nodes are merged before their soft deps. Go ahead and
7117                                 # select root nodes here if necessary, since it's typical for
7118                                 # the parent to have been removed from the graph already.
7119                                 asap_nodes = [node for node in asap_nodes \
7120                                         if mygraph.contains(node)]
7121                                 for node in asap_nodes:
7122                                         if not mygraph.child_nodes(node,
7123                                                 ignore_priority=priority_range.ignore_soft):
7124                                                 selected_nodes = [node]
7125                                                 asap_nodes.remove(node)
7126                                                 break
7127                         if not selected_nodes and \
7128                                 not (prefer_asap and asap_nodes):
7129                                 for i in xrange(priority_range.NONE,
7130                                         priority_range.MEDIUM_SOFT + 1):
7131                                         ignore_priority = priority_range.ignore_priority[i]
7132                                         nodes = get_nodes(ignore_priority=ignore_priority)
7133                                         if nodes:
7134                                                 # If there is a mix of uninstall nodes with other
7135                                                 # types, save the uninstall nodes for later since
7136                                                 # sometimes a merge node will render an uninstall
7137                                                 # node unnecessary (due to occupying the same slot),
7138                                                 # and we want to avoid executing a separate uninstall
7139                                                 # task in that case.
7140                                                 if len(nodes) > 1:
7141                                                         good_uninstalls = []
7142                                                         with_some_uninstalls_excluded = []
7143                                                         for node in nodes:
7144                                                                 if node.operation == "uninstall":
7145                                                                         slot_node = self.mydbapi[node.root
7146                                                                                 ].match_pkgs(node.slot_atom)
7147                                                                         if slot_node and \
7148                                                                                 slot_node[0].operation == "merge":
7149                                                                                 continue
7150                                                                         good_uninstalls.append(node)
7151                                                                 with_some_uninstalls_excluded.append(node)
7152                                                         if good_uninstalls:
7153                                                                 nodes = good_uninstalls
7154                                                         elif with_some_uninstalls_excluded:
7155                                                                 nodes = with_some_uninstalls_excluded
7156                                                         else:
7157                                                                 nodes = nodes
7158
7159                                                 if ignore_priority is None and not tree_mode:
7160                                                         # Greedily pop all of these nodes since no
7161                                                         # relationship has been ignored. This optimization
7162                                                         # destroys --tree output, so it's disabled in tree
7163                                                         # mode.
7164                                                         selected_nodes = nodes
7165                                                 else:
7166                                                         # For optimal merge order:
7167                                                         #  * Only pop one node.
7168                                                         #  * Removing a root node (node without a parent)
7169                                                         #    will not produce a leaf node, so avoid it.
7170                                                         #  * It's normal for a selected uninstall to be a
7171                                                         #    root node, so don't check them for parents.
7172                                                         for node in nodes:
7173                                                                 if node.operation == "uninstall" or \
7174                                                                         mygraph.parent_nodes(node):
7175                                                                         selected_nodes = [node]
7176                                                                         break
7177
7178                                                 if selected_nodes:
7179                                                         break
7180
7181                         if not selected_nodes:
7182                                 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
7183                                 if nodes:
7184                                         mergeable_nodes = set(nodes)
7185                                         if prefer_asap and asap_nodes:
7186                                                 nodes = asap_nodes
7187                                         for i in xrange(priority_range.SOFT,
7188                                                 priority_range.MEDIUM_SOFT + 1):
7189                                                 ignore_priority = priority_range.ignore_priority[i]
7190                                                 for node in nodes:
7191                                                         if not mygraph.parent_nodes(node):
7192                                                                 continue
7193                                                         selected_nodes = set()
7194                                                         if gather_deps(ignore_priority,
7195                                                                 mergeable_nodes, selected_nodes, node):
7196                                                                 break
7197                                                         else:
7198                                                                 selected_nodes = None
7199                                                 if selected_nodes:
7200                                                         break
7201
7202                                         if prefer_asap and asap_nodes and not selected_nodes:
7203                                                 # We failed to find any asap nodes to merge, so ignore
7204                                                 # them for the next iteration.
7205                                                 prefer_asap = False
7206                                                 continue
7207
7208                         if selected_nodes and ignore_priority is not None:
7209                                 # Try to merge ignored medium_soft deps as soon as possible
7210                                 # if they're not satisfied by installed packages.
7211                                 for node in selected_nodes:
7212                                         children = set(mygraph.child_nodes(node))
7213                                         soft = children.difference(
7214                                                 mygraph.child_nodes(node,
7215                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
7216                                         medium_soft = children.difference(
7217                                                 mygraph.child_nodes(node,
7218                                                         ignore_priority = \
7219                                                         DepPrioritySatisfiedRange.ignore_medium_soft))
7220                                         medium_soft.difference_update(soft)
7221                                         for child in medium_soft:
7222                                                 if child in selected_nodes:
7223                                                         continue
7224                                                 if child in asap_nodes:
7225                                                         continue
7226                                                 asap_nodes.append(child)
7227
7228                         if selected_nodes and len(selected_nodes) > 1:
7229                                 if not isinstance(selected_nodes, list):
7230                                         selected_nodes = list(selected_nodes)
7231                                 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
7232
7233                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7234                                 # An Uninstall task needs to be executed in order to
7235                                 # avoid conflict if possible.
7236
7237                                 if drop_satisfied:
7238                                         priority_range = DepPrioritySatisfiedRange
7239                                 else:
7240                                         priority_range = DepPriorityNormalRange
7241
7242                                 mergeable_nodes = get_nodes(
7243                                         ignore_priority=ignore_uninst_or_med)
7244
7245                                 min_parent_deps = None
7246                                 uninst_task = None
7247                                 for task in myblocker_uninstalls.leaf_nodes():
7248                                         # Do some sanity checks so that system or world packages
7249                                         # don't get uninstalled inappropriately here (only really
7250                                         # necessary when --complete-graph has not been enabled).
7251
7252                                         if task in ignored_uninstall_tasks:
7253                                                 continue
7254
7255                                         if task in scheduled_uninstalls:
7256                                                 # It's been scheduled but it hasn't
7257                                                 # been executed yet due to dependence
7258                                                 # on installation of blocking packages.
7259                                                 continue
7260
7261                                         root_config = self.roots[task.root]
7262                                         inst_pkg = self._pkg_cache[
7263                                                 ("installed", task.root, task.cpv, "nomerge")]
7264
7265                                         if self.digraph.contains(inst_pkg):
7266                                                 continue
7267
7268                                         forbid_overlap = False
7269                                         heuristic_overlap = False
7270                                         for blocker in myblocker_uninstalls.parent_nodes(task):
7271                                                 if blocker.eapi in ("0", "1"):
7272                                                         heuristic_overlap = True
7273                                                 elif blocker.atom.blocker.overlap.forbid:
7274                                                         forbid_overlap = True
7275                                                         break
7276                                         if forbid_overlap and running_root == task.root:
7277                                                 continue
7278
7279                                         if heuristic_overlap and running_root == task.root:
7280                                                 # Never uninstall sys-apps/portage or it's essential
7281                                                 # dependencies, except through replacement.
7282                                                 try:
7283                                                         runtime_dep_atoms = \
7284                                                                 list(runtime_deps.iterAtomsForPackage(task))
7285                                                 except portage.exception.InvalidDependString, e:
7286                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7287                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7288                                                                 (task.root, task.cpv, e), noiselevel=-1)
7289                                                         del e
7290                                                         continue
7291
7292                                                 # Don't uninstall a runtime dep if it appears
7293                                                 # to be the only suitable one installed.
7294                                                 skip = False
7295                                                 vardb = root_config.trees["vartree"].dbapi
7296                                                 for atom in runtime_dep_atoms:
7297                                                         other_version = None
7298                                                         for pkg in vardb.match_pkgs(atom):
7299                                                                 if pkg.cpv == task.cpv and \
7300                                                                         pkg.metadata["COUNTER"] == \
7301                                                                         task.metadata["COUNTER"]:
7302                                                                         continue
7303                                                                 other_version = pkg
7304                                                                 break
7305                                                         if other_version is None:
7306                                                                 skip = True
7307                                                                 break
7308                                                 if skip:
7309                                                         continue
7310
7311                                                 # For packages in the system set, don't take
7312                                                 # any chances. If the conflict can't be resolved
7313                                                 # by a normal replacement operation then abort.
7314                                                 skip = False
7315                                                 try:
7316                                                         for atom in root_config.sets[
7317                                                                 "system"].iterAtomsForPackage(task):
7318                                                                 skip = True
7319                                                                 break
7320                                                 except portage.exception.InvalidDependString, e:
7321                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7322                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7323                                                                 (task.root, task.cpv, e), noiselevel=-1)
7324                                                         del e
7325                                                         skip = True
7326                                                 if skip:
7327                                                         continue
7328
7329                                         # Note that the world check isn't always
7330                                         # necessary since self._complete_graph() will
7331                                         # add all packages from the system and world sets to the
7332                                         # graph. This just allows unresolved conflicts to be
7333                                         # detected as early as possible, which makes it possible
7334                                         # to avoid calling self._complete_graph() when it is
7335                                         # unnecessary due to blockers triggering an abortion.
7336                                         if not complete:
7337                                                 # For packages in the world set, go ahead an uninstall
7338                                                 # when necessary, as long as the atom will be satisfied
7339                                                 # in the final state.
7340                                                 graph_db = self.mydbapi[task.root]
7341                                                 skip = False
7342                                                 try:
7343                                                         for atom in root_config.sets[
7344                                                                 "world"].iterAtomsForPackage(task):
7345                                                                 satisfied = False
7346                                                                 for pkg in graph_db.match_pkgs(atom):
7347                                                                         if pkg == inst_pkg:
7348                                                                                 continue
7349                                                                         satisfied = True
7350                                                                         break
7351                                                                 if not satisfied:
7352                                                                         skip = True
7353                                                                         self._blocked_world_pkgs[inst_pkg] = atom
7354                                                                         break
7355                                                 except portage.exception.InvalidDependString, e:
7356                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7357                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7358                                                                 (task.root, task.cpv, e), noiselevel=-1)
7359                                                         del e
7360                                                         skip = True
7361                                                 if skip:
7362                                                         continue
7363
7364                                         # Check the deps of parent nodes to ensure that
7365                                         # the chosen task produces a leaf node. Maybe
7366                                         # this can be optimized some more to make the
7367                                         # best possible choice, but the current algorithm
7368                                         # is simple and should be near optimal for most
7369                                         # common cases.
7370                                         mergeable_parent = False
7371                                         parent_deps = set()
7372                                         for parent in mygraph.parent_nodes(task):
7373                                                 parent_deps.update(mygraph.child_nodes(parent,
7374                                                         ignore_priority=priority_range.ignore_medium_soft))
7375                                                 if parent in mergeable_nodes and \
7376                                                         gather_deps(ignore_uninst_or_med_soft,
7377                                                         mergeable_nodes, set(), parent):
7378                                                         mergeable_parent = True
7379
7380                                         if not mergeable_parent:
7381                                                 continue
7382
7383                                         parent_deps.remove(task)
7384                                         if min_parent_deps is None or \
7385                                                 len(parent_deps) < min_parent_deps:
7386                                                 min_parent_deps = len(parent_deps)
7387                                                 uninst_task = task
7388
7389                                 if uninst_task is not None:
7390                                         # The uninstall is performed only after blocking
7391                                         # packages have been merged on top of it. File
7392                                         # collisions between blocking packages are detected
7393                                         # and removed from the list of files to be uninstalled.
7394                                         scheduled_uninstalls.add(uninst_task)
7395                                         parent_nodes = mygraph.parent_nodes(uninst_task)
7396
7397                                         # Reverse the parent -> uninstall edges since we want
7398                                         # to do the uninstall after blocking packages have
7399                                         # been merged on top of it.
7400                                         mygraph.remove(uninst_task)
7401                                         for blocked_pkg in parent_nodes:
7402                                                 mygraph.add(blocked_pkg, uninst_task,
7403                                                         priority=BlockerDepPriority.instance)
7404                                                 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7405                                                 scheduler_graph.add(blocked_pkg, uninst_task,
7406                                                         priority=BlockerDepPriority.instance)
7407
7408                                         # Reset the state variables for leaf node selection and
7409                                         # continue trying to select leaf nodes.
7410                                         prefer_asap = True
7411                                         drop_satisfied = False
7412                                         continue
7413
7414                         if not selected_nodes:
7415                                 # Only select root nodes as a last resort. This case should
7416                                 # only trigger when the graph is nearly empty and the only
7417                                 # remaining nodes are isolated (no parents or children). Since
7418                                 # the nodes must be isolated, ignore_priority is not needed.
7419                                 selected_nodes = get_nodes()
7420
7421                         if not selected_nodes and not drop_satisfied:
7422                                 drop_satisfied = True
7423                                 continue
7424
7425                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7426                                 # If possible, drop an uninstall task here in order to avoid
7427                                 # the circular deps code path. The corresponding blocker will
7428                                 # still be counted as an unresolved conflict.
7429                                 uninst_task = None
7430                                 for node in myblocker_uninstalls.leaf_nodes():
7431                                         try:
7432                                                 mygraph.remove(node)
7433                                         except KeyError:
7434                                                 pass
7435                                         else:
7436                                                 uninst_task = node
7437                                                 ignored_uninstall_tasks.add(node)
7438                                                 break
7439
7440                                 if uninst_task is not None:
7441                                         # Reset the state variables for leaf node selection and
7442                                         # continue trying to select leaf nodes.
7443                                         prefer_asap = True
7444                                         drop_satisfied = False
7445                                         continue
7446
7447                         if not selected_nodes:
7448                                 self._circular_deps_for_display = mygraph
7449                                 raise self._unknown_internal_error()
7450
7451                         # At this point, we've succeeded in selecting one or more nodes, so
7452                         # reset state variables for leaf node selection.
7453                         prefer_asap = True
7454                         drop_satisfied = False
7455
7456                         mygraph.difference_update(selected_nodes)
7457
7458                         for node in selected_nodes:
7459                                 if isinstance(node, Package) and \
7460                                         node.operation == "nomerge":
7461                                         continue
7462
7463                                 # Handle interactions between blockers
7464                                 # and uninstallation tasks.
7465                                 solved_blockers = set()
7466                                 uninst_task = None
7467                                 if isinstance(node, Package) and \
7468                                         "uninstall" == node.operation:
7469                                         have_uninstall_task = True
7470                                         uninst_task = node
7471                                 else:
7472                                         vardb = self.trees[node.root]["vartree"].dbapi
7473                                         previous_cpv = vardb.match(node.slot_atom)
7474                                         if previous_cpv:
7475                                                 # The package will be replaced by this one, so remove
7476                                                 # the corresponding Uninstall task if necessary.
7477                                                 previous_cpv = previous_cpv[0]
7478                                                 uninst_task = \
7479                                                         ("installed", node.root, previous_cpv, "uninstall")
7480                                                 try:
7481                                                         mygraph.remove(uninst_task)
7482                                                 except KeyError:
7483                                                         pass
7484
7485                                 if uninst_task is not None and \
7486                                         uninst_task not in ignored_uninstall_tasks and \
7487                                         myblocker_uninstalls.contains(uninst_task):
7488                                         blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7489                                         myblocker_uninstalls.remove(uninst_task)
7490                                         # Discard any blockers that this Uninstall solves.
7491                                         for blocker in blocker_nodes:
7492                                                 if not myblocker_uninstalls.child_nodes(blocker):
7493                                                         myblocker_uninstalls.remove(blocker)
7494                                                         solved_blockers.add(blocker)
7495
7496                                 retlist.append(node)
7497
7498                                 if (isinstance(node, Package) and \
7499                                         "uninstall" == node.operation) or \
7500                                         (uninst_task is not None and \
7501                                         uninst_task in scheduled_uninstalls):
7502                                         # Include satisfied blockers in the merge list
7503                                         # since the user might be interested and also
7504                                         # it serves as an indicator that blocking packages
7505                                         # will be temporarily installed simultaneously.
7506                                         for blocker in solved_blockers:
7507                                                 retlist.append(Blocker(atom=blocker.atom,
7508                                                         root=blocker.root, eapi=blocker.eapi,
7509                                                         satisfied=True))
7510
7511                 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7512                 for node in myblocker_uninstalls.root_nodes():
7513                         unsolvable_blockers.add(node)
7514
7515                 for blocker in unsolvable_blockers:
7516                         retlist.append(blocker)
7517
7518                 # If any Uninstall tasks need to be executed in order
7519                 # to avoid a conflict, complete the graph with any
7520                 # dependencies that may have been initially
7521                 # neglected (to ensure that unsafe Uninstall tasks
7522                 # are properly identified and blocked from execution).
7523                 if have_uninstall_task and \
7524                         not complete and \
7525                         not unsolvable_blockers:
7526                         self.myparams.add("complete")
7527                         raise self._serialize_tasks_retry("")
7528
7529                 if unsolvable_blockers and \
7530                         not self._accept_blocker_conflicts():
7531                         self._unsatisfied_blockers_for_display = unsolvable_blockers
7532                         self._serialized_tasks_cache = retlist[:]
7533                         self._scheduler_graph = scheduler_graph
7534                         raise self._unknown_internal_error()
7535
7536                 if self._slot_collision_info and \
7537                         not self._accept_blocker_conflicts():
7538                         self._serialized_tasks_cache = retlist[:]
7539                         self._scheduler_graph = scheduler_graph
7540                         raise self._unknown_internal_error()
7541
7542                 return retlist, scheduler_graph
7543
7544         def _show_circular_deps(self, mygraph):
7545                 # No leaf nodes are available, so we have a circular
7546                 # dependency panic situation.  Reduce the noise level to a
7547                 # minimum via repeated elimination of root nodes since they
7548                 # have no parents and thus can not be part of a cycle.
7549                 while True:
7550                         root_nodes = mygraph.root_nodes(
7551                                 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
7552                         if not root_nodes:
7553                                 break
7554                         mygraph.difference_update(root_nodes)
7555                 # Display the USE flags that are enabled on nodes that are part
7556                 # of dependency cycles in case that helps the user decide to
7557                 # disable some of them.
7558                 display_order = []
7559                 tempgraph = mygraph.copy()
7560                 while not tempgraph.empty():
7561                         nodes = tempgraph.leaf_nodes()
7562                         if not nodes:
7563                                 node = tempgraph.order[0]
7564                         else:
7565                                 node = nodes[0]
7566                         display_order.append(node)
7567                         tempgraph.remove(node)
7568                 display_order.reverse()
7569                 self.myopts.pop("--quiet", None)
7570                 self.myopts.pop("--verbose", None)
7571                 self.myopts["--tree"] = True
7572                 portage.writemsg("\n\n", noiselevel=-1)
7573                 self.display(display_order)
7574                 prefix = colorize("BAD", " * ")
7575                 portage.writemsg("\n", noiselevel=-1)
7576                 portage.writemsg(prefix + "Error: circular dependencies:\n",
7577                         noiselevel=-1)
7578                 portage.writemsg("\n", noiselevel=-1)
7579                 mygraph.debug_print()
7580                 portage.writemsg("\n", noiselevel=-1)
7581                 portage.writemsg(prefix + "Note that circular dependencies " + \
7582                         "can often be avoided by temporarily\n", noiselevel=-1)
7583                 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7584                         "optional dependencies.\n", noiselevel=-1)
7585
7586         def _show_merge_list(self):
7587                 if self._serialized_tasks_cache is not None and \
7588                         not (self._displayed_list and \
7589                         (self._displayed_list == self._serialized_tasks_cache or \
7590                         self._displayed_list == \
7591                                 list(reversed(self._serialized_tasks_cache)))):
7592                         display_list = self._serialized_tasks_cache[:]
7593                         if "--tree" in self.myopts:
7594                                 display_list.reverse()
7595                         self.display(display_list)
7596
7597         def _show_unsatisfied_blockers(self, blockers):
7598                 self._show_merge_list()
7599                 msg = "Error: The above package list contains " + \
7600                         "packages which cannot be installed " + \
7601                         "at the same time on the same system."
7602                 prefix = colorize("BAD", " * ")
7603                 from textwrap import wrap
7604                 portage.writemsg("\n", noiselevel=-1)
7605                 for line in wrap(msg, 70):
7606                         portage.writemsg(prefix + line + "\n", noiselevel=-1)
7607
7608                 # Display the conflicting packages along with the packages
7609                 # that pulled them in. This is helpful for troubleshooting
7610                 # cases in which blockers don't solve automatically and
7611                 # the reasons are not apparent from the normal merge list
7612                 # display.
7613
7614                 conflict_pkgs = {}
7615                 for blocker in blockers:
7616                         for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7617                                 self._blocker_parents.parent_nodes(blocker)):
7618                                 parent_atoms = self._parent_atoms.get(pkg)
7619                                 if not parent_atoms:
7620                                         atom = self._blocked_world_pkgs.get(pkg)
7621                                         if atom is not None:
7622                                                 parent_atoms = set([("@world", atom)])
7623                                 if parent_atoms:
7624                                         conflict_pkgs[pkg] = parent_atoms
7625
7626                 if conflict_pkgs:
7627                         # Reduce noise by pruning packages that are only
7628                         # pulled in by other conflict packages.
7629                         pruned_pkgs = set()
7630                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7631                                 relevant_parent = False
7632                                 for parent, atom in parent_atoms:
7633                                         if parent not in conflict_pkgs:
7634                                                 relevant_parent = True
7635                                                 break
7636                                 if not relevant_parent:
7637                                         pruned_pkgs.add(pkg)
7638                         for pkg in pruned_pkgs:
7639                                 del conflict_pkgs[pkg]
7640
7641                 if conflict_pkgs:
7642                         msg = []
7643                         msg.append("\n")
7644                         indent = "  "
7645                         # Max number of parents shown, to avoid flooding the display.
7646                         max_parents = 3
7647                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7648
7649                                 pruned_list = set()
7650
7651                                 # Prefer packages that are not directly involved in a conflict.
7652                                 for parent_atom in parent_atoms:
7653                                         if len(pruned_list) >= max_parents:
7654                                                 break
7655                                         parent, atom = parent_atom
7656                                         if parent not in conflict_pkgs:
7657                                                 pruned_list.add(parent_atom)
7658
7659                                 for parent_atom in parent_atoms:
7660                                         if len(pruned_list) >= max_parents:
7661                                                 break
7662                                         pruned_list.add(parent_atom)
7663
7664                                 omitted_parents = len(parent_atoms) - len(pruned_list)
7665                                 msg.append(indent + "%s pulled in by\n" % pkg)
7666
7667                                 for parent_atom in pruned_list:
7668                                         parent, atom = parent_atom
7669                                         msg.append(2*indent)
7670                                         if isinstance(parent,
7671                                                 (PackageArg, AtomArg)):
7672                                                 # For PackageArg and AtomArg types, it's
7673                                                 # redundant to display the atom attribute.
7674                                                 msg.append(str(parent))
7675                                         else:
7676                                                 # Display the specific atom from SetArg or
7677                                                 # Package types.
7678                                                 msg.append("%s required by %s" % (atom, parent))
7679                                         msg.append("\n")
7680
7681                                 if omitted_parents:
7682                                         msg.append(2*indent)
7683                                         msg.append("(and %d more)\n" % omitted_parents)
7684
7685                                 msg.append("\n")
7686
7687                         sys.stderr.write("".join(msg))
7688                         sys.stderr.flush()
7689
7690                 if "--quiet" not in self.myopts:
7691                         show_blocker_docs_link()
7692
7693         def display(self, mylist, favorites=[], verbosity=None):
7694
7695                 # This is used to prevent display_problems() from
7696                 # redundantly displaying this exact same merge list
7697                 # again via _show_merge_list().
7698                 self._displayed_list = mylist
7699
7700                 if verbosity is None:
7701                         verbosity = ("--quiet" in self.myopts and 1 or \
7702                                 "--verbose" in self.myopts and 3 or 2)
7703                 favorites_set = InternalPackageSet(favorites)
7704                 oneshot = "--oneshot" in self.myopts or \
7705                         "--onlydeps" in self.myopts
7706                 columns = "--columns" in self.myopts
7707                 changelogs=[]
7708                 p=[]
7709                 blockers = []
7710
7711                 counters = PackageCounters()
7712
7713                 if verbosity == 1 and "--verbose" not in self.myopts:
7714                         def create_use_string(*args):
7715                                 return ""
7716                 else:
7717                         def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7718                                 old_iuse, old_use,
7719                                 is_new, reinst_flags,
7720                                 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7721                                 alphabetical=("--alphabetical" in self.myopts)):
7722                                 enabled = []
7723                                 if alphabetical:
7724                                         disabled = enabled
7725                                         removed = enabled
7726                                 else:
7727                                         disabled = []
7728                                         removed = []
7729                                 cur_iuse = set(cur_iuse)
7730                                 enabled_flags = cur_iuse.intersection(cur_use)
7731                                 removed_iuse = set(old_iuse).difference(cur_iuse)
7732                                 any_iuse = cur_iuse.union(old_iuse)
7733                                 any_iuse = list(any_iuse)
7734                                 any_iuse.sort()
7735                                 for flag in any_iuse:
7736                                         flag_str = None
7737                                         isEnabled = False
7738                                         reinst_flag = reinst_flags and flag in reinst_flags
7739                                         if flag in enabled_flags:
7740                                                 isEnabled = True
7741                                                 if is_new or flag in old_use and \
7742                                                         (all_flags or reinst_flag):
7743                                                         flag_str = red(flag)
7744                                                 elif flag not in old_iuse:
7745                                                         flag_str = yellow(flag) + "%*"
7746                                                 elif flag not in old_use:
7747                                                         flag_str = green(flag) + "*"
7748                                         elif flag in removed_iuse:
7749                                                 if all_flags or reinst_flag:
7750                                                         flag_str = yellow("-" + flag) + "%"
7751                                                         if flag in old_use:
7752                                                                 flag_str += "*"
7753                                                         flag_str = "(" + flag_str + ")"
7754                                                         removed.append(flag_str)
7755                                                 continue
7756                                         else:
7757                                                 if is_new or flag in old_iuse and \
7758                                                         flag not in old_use and \
7759                                                         (all_flags or reinst_flag):
7760                                                         flag_str = blue("-" + flag)
7761                                                 elif flag not in old_iuse:
7762                                                         flag_str = yellow("-" + flag)
7763                                                         if flag not in iuse_forced:
7764                                                                 flag_str += "%"
7765                                                 elif flag in old_use:
7766                                                         flag_str = green("-" + flag) + "*"
7767                                         if flag_str:
7768                                                 if flag in iuse_forced:
7769                                                         flag_str = "(" + flag_str + ")"
7770                                                 if isEnabled:
7771                                                         enabled.append(flag_str)
7772                                                 else:
7773                                                         disabled.append(flag_str)
7774
7775                                 if alphabetical:
7776                                         ret = " ".join(enabled)
7777                                 else:
7778                                         ret = " ".join(enabled + disabled + removed)
7779                                 if ret:
7780                                         ret = '%s="%s" ' % (name, ret)
7781                                 return ret
7782
7783                 repo_display = RepoDisplay(self.roots)
7784
7785                 tree_nodes = []
7786                 display_list = []
7787                 mygraph = self.digraph.copy()
7788
7789                 # If there are any Uninstall instances, add the corresponding
7790                 # blockers to the digraph (useful for --tree display).
7791
7792                 executed_uninstalls = set(node for node in mylist \
7793                         if isinstance(node, Package) and node.operation == "unmerge")
7794
7795                 for uninstall in self._blocker_uninstalls.leaf_nodes():
7796                         uninstall_parents = \
7797                                 self._blocker_uninstalls.parent_nodes(uninstall)
7798                         if not uninstall_parents:
7799                                 continue
7800
7801                         # Remove the corresponding "nomerge" node and substitute
7802                         # the Uninstall node.
7803                         inst_pkg = self._pkg_cache[
7804                                 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7805                         try:
7806                                 mygraph.remove(inst_pkg)
7807                         except KeyError:
7808                                 pass
7809
7810                         try:
7811                                 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7812                         except KeyError:
7813                                 inst_pkg_blockers = []
7814
7815                         # Break the Package -> Uninstall edges.
7816                         mygraph.remove(uninstall)
7817
7818                         # Resolution of a package's blockers
7819                         # depend on it's own uninstallation.
7820                         for blocker in inst_pkg_blockers:
7821                                 mygraph.add(uninstall, blocker)
7822
7823                         # Expand Package -> Uninstall edges into
7824                         # Package -> Blocker -> Uninstall edges.
7825                         for blocker in uninstall_parents:
7826                                 mygraph.add(uninstall, blocker)
7827                                 for parent in self._blocker_parents.parent_nodes(blocker):
7828                                         if parent != inst_pkg:
7829                                                 mygraph.add(blocker, parent)
7830
7831                         # If the uninstall task did not need to be executed because
7832                         # of an upgrade, display Blocker -> Upgrade edges since the
7833                         # corresponding Blocker -> Uninstall edges will not be shown.
7834                         upgrade_node = \
7835                                 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7836                         if upgrade_node is not None and \
7837                                 uninstall not in executed_uninstalls:
7838                                 for blocker in uninstall_parents:
7839                                         mygraph.add(upgrade_node, blocker)
7840
7841                 unsatisfied_blockers = []
7842                 i = 0
7843                 depth = 0
7844                 shown_edges = set()
7845                 for x in mylist:
7846                         if isinstance(x, Blocker) and not x.satisfied:
7847                                 unsatisfied_blockers.append(x)
7848                                 continue
7849                         graph_key = x
7850                         if "--tree" in self.myopts:
7851                                 depth = len(tree_nodes)
7852                                 while depth and graph_key not in \
7853                                         mygraph.child_nodes(tree_nodes[depth-1]):
7854                                                 depth -= 1
7855                                 if depth:
7856                                         tree_nodes = tree_nodes[:depth]
7857                                         tree_nodes.append(graph_key)
7858                                         display_list.append((x, depth, True))
7859                                         shown_edges.add((graph_key, tree_nodes[depth-1]))
7860                                 else:
7861                                         traversed_nodes = set() # prevent endless circles
7862                                         traversed_nodes.add(graph_key)
7863                                         def add_parents(current_node, ordered):
7864                                                 parent_nodes = None
7865                                                 # Do not traverse to parents if this node is an
7866                                                 # an argument or a direct member of a set that has
7867                                                 # been specified as an argument (system or world).
7868                                                 if current_node not in self._set_nodes:
7869                                                         parent_nodes = mygraph.parent_nodes(current_node)
7870                                                 if parent_nodes:
7871                                                         child_nodes = set(mygraph.child_nodes(current_node))
7872                                                         selected_parent = None
7873                                                         # First, try to avoid a direct cycle.
7874                                                         for node in parent_nodes:
7875                                                                 if not isinstance(node, (Blocker, Package)):
7876                                                                         continue
7877                                                                 if node not in traversed_nodes and \
7878                                                                         node not in child_nodes:
7879                                                                         edge = (current_node, node)
7880                                                                         if edge in shown_edges:
7881                                                                                 continue
7882                                                                         selected_parent = node
7883                                                                         break
7884                                                         if not selected_parent:
7885                                                                 # A direct cycle is unavoidable.
7886                                                                 for node in parent_nodes:
7887                                                                         if not isinstance(node, (Blocker, Package)):
7888                                                                                 continue
7889                                                                         if node not in traversed_nodes:
7890                                                                                 edge = (current_node, node)
7891                                                                                 if edge in shown_edges:
7892                                                                                         continue
7893                                                                                 selected_parent = node
7894                                                                                 break
7895                                                         if selected_parent:
7896                                                                 shown_edges.add((current_node, selected_parent))
7897                                                                 traversed_nodes.add(selected_parent)
7898                                                                 add_parents(selected_parent, False)
7899                                                 display_list.append((current_node,
7900                                                         len(tree_nodes), ordered))
7901                                                 tree_nodes.append(current_node)
7902                                         tree_nodes = []
7903                                         add_parents(graph_key, True)
7904                         else:
7905                                 display_list.append((x, depth, True))
7906                 mylist = display_list
7907                 for x in unsatisfied_blockers:
7908                         mylist.append((x, 0, True))
7909
7910                 last_merge_depth = 0
7911                 for i in xrange(len(mylist)-1,-1,-1):
7912                         graph_key, depth, ordered = mylist[i]
7913                         if not ordered and depth == 0 and i > 0 \
7914                                 and graph_key == mylist[i-1][0] and \
7915                                 mylist[i-1][1] == 0:
7916                                 # An ordered node got a consecutive duplicate when the tree was
7917                                 # being filled in.
7918                                 del mylist[i]
7919                                 continue
7920                         if ordered and graph_key[-1] != "nomerge":
7921                                 last_merge_depth = depth
7922                                 continue
7923                         if depth >= last_merge_depth or \
7924                                 i < len(mylist) - 1 and \
7925                                 depth >= mylist[i+1][1]:
7926                                         del mylist[i]
7927
7928                 from portage import flatten
7929                 from portage.dep import use_reduce, paren_reduce
7930                 # files to fetch list - avoids counting a same file twice
7931                 # in size display (verbose mode)
7932                 myfetchlist=[]
7933
7934                 # Use this set to detect when all the "repoadd" strings are "[0]"
7935                 # and disable the entire repo display in this case.
7936                 repoadd_set = set()
7937
7938                 for mylist_index in xrange(len(mylist)):
7939                         x, depth, ordered = mylist[mylist_index]
7940                         pkg_type = x[0]
7941                         myroot = x[1]
7942                         pkg_key = x[2]
7943                         portdb = self.trees[myroot]["porttree"].dbapi
7944                         bindb  = self.trees[myroot]["bintree"].dbapi
7945                         vardb = self.trees[myroot]["vartree"].dbapi
7946                         vartree = self.trees[myroot]["vartree"]
7947                         pkgsettings = self.pkgsettings[myroot]
7948
7949                         fetch=" "
7950                         indent = " " * depth
7951
7952                         if isinstance(x, Blocker):
7953                                 if x.satisfied:
7954                                         blocker_style = "PKG_BLOCKER_SATISFIED"
7955                                         addl = "%s  %s  " % (colorize(blocker_style, "b"), fetch)
7956                                 else:
7957                                         blocker_style = "PKG_BLOCKER"
7958                                         addl = "%s  %s  " % (colorize(blocker_style, "B"), fetch)
7959                                 if ordered:
7960                                         counters.blocks += 1
7961                                         if x.satisfied:
7962                                                 counters.blocks_satisfied += 1
7963                                 resolved = portage.key_expand(
7964                                         str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
7965                                 if "--columns" in self.myopts and "--quiet" in self.myopts:
7966                                         addl += " " + colorize(blocker_style, resolved)
7967                                 else:
7968                                         addl = "[%s %s] %s%s" % \
7969                                                 (colorize(blocker_style, "blocks"),
7970                                                 addl, indent, colorize(blocker_style, resolved))
7971                                 block_parents = self._blocker_parents.parent_nodes(x)
7972                                 block_parents = set([pnode[2] for pnode in block_parents])
7973                                 block_parents = ", ".join(block_parents)
7974                                 if resolved!=x[2]:
7975                                         addl += colorize(blocker_style,
7976                                                 " (\"%s\" is blocking %s)") % \
7977                                                 (str(x.atom).lstrip("!"), block_parents)
7978                                 else:
7979                                         addl += colorize(blocker_style,
7980                                                 " (is blocking %s)") % block_parents
7981                                 if isinstance(x, Blocker) and x.satisfied:
7982                                         if columns:
7983                                                 continue
7984                                         p.append(addl)
7985                                 else:
7986                                         blockers.append(addl)
7987                         else:
7988                                 pkg_status = x[3]
7989                                 pkg_merge = ordered and pkg_status == "merge"
7990                                 if not pkg_merge and pkg_status == "merge":
7991                                         pkg_status = "nomerge"
7992                                 built = pkg_type != "ebuild"
7993                                 installed = pkg_type == "installed"
7994                                 pkg = x
7995                                 metadata = pkg.metadata
7996                                 ebuild_path = None
7997                                 repo_name = metadata["repository"]
7998                                 if pkg_type == "ebuild":
7999                                         ebuild_path = portdb.findname(pkg_key)
8000                                         if not ebuild_path: # shouldn't happen
8001                                                 raise portage.exception.PackageNotFound(pkg_key)
8002                                         repo_path_real = os.path.dirname(os.path.dirname(
8003                                                 os.path.dirname(ebuild_path)))
8004                                 else:
8005                                         repo_path_real = portdb.getRepositoryPath(repo_name)
8006                                 pkg_use = list(pkg.use.enabled)
8007                                 try:
8008                                         restrict = flatten(use_reduce(paren_reduce(
8009                                                 pkg.metadata["RESTRICT"]), uselist=pkg_use))
8010                                 except portage.exception.InvalidDependString, e:
8011                                         if not pkg.installed:
8012                                                 show_invalid_depstring_notice(x,
8013                                                         pkg.metadata["RESTRICT"], str(e))
8014                                                 del e
8015                                                 return 1
8016                                         restrict = []
8017                                 if "ebuild" == pkg_type and x[3] != "nomerge" and \
8018                                         "fetch" in restrict:
8019                                         fetch = red("F")
8020                                         if ordered:
8021                                                 counters.restrict_fetch += 1
8022                                         if portdb.fetch_check(pkg_key, pkg_use):
8023                                                 fetch = green("f")
8024                                                 if ordered:
8025                                                         counters.restrict_fetch_satisfied += 1
8026
8027                                 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
8028                                 #param is used for -u, where you still *do* want to see when something is being upgraded.
8029                                 myoldbest = []
8030                                 myinslotlist = None
8031                                 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
8032                                 if vardb.cpv_exists(pkg_key):
8033                                         addl="  "+yellow("R")+fetch+"  "
8034                                         if ordered:
8035                                                 if pkg_merge:
8036                                                         counters.reinst += 1
8037                                                 elif pkg_status == "uninstall":
8038                                                         counters.uninst += 1
8039                                 # filter out old-style virtual matches
8040                                 elif installed_versions and \
8041                                         portage.cpv_getkey(installed_versions[0]) == \
8042                                         portage.cpv_getkey(pkg_key):
8043                                         myinslotlist = vardb.match(pkg.slot_atom)
8044                                         # If this is the first install of a new-style virtual, we
8045                                         # need to filter out old-style virtual matches.
8046                                         if myinslotlist and \
8047                                                 portage.cpv_getkey(myinslotlist[0]) != \
8048                                                 portage.cpv_getkey(pkg_key):
8049                                                 myinslotlist = None
8050                                         if myinslotlist:
8051                                                 myoldbest = myinslotlist[:]
8052                                                 addl = "   " + fetch
8053                                                 if not portage.dep.cpvequal(pkg_key,
8054                                                         portage.best([pkg_key] + myoldbest)):
8055                                                         # Downgrade in slot
8056                                                         addl += turquoise("U")+blue("D")
8057                                                         if ordered:
8058                                                                 counters.downgrades += 1
8059                                                 else:
8060                                                         # Update in slot
8061                                                         addl += turquoise("U") + " "
8062                                                         if ordered:
8063                                                                 counters.upgrades += 1
8064                                         else:
8065                                                 # New slot, mark it new.
8066                                                 addl = " " + green("NS") + fetch + "  "
8067                                                 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
8068                                                 if ordered:
8069                                                         counters.newslot += 1
8070
8071                                         if "--changelog" in self.myopts:
8072                                                 inst_matches = vardb.match(pkg.slot_atom)
8073                                                 if inst_matches:
8074                                                         changelogs.extend(self.calc_changelog(
8075                                                                 portdb.findname(pkg_key),
8076                                                                 inst_matches[0], pkg_key))
8077                                 else:
8078                                         addl = " " + green("N") + " " + fetch + "  "
8079                                         if ordered:
8080                                                 counters.new += 1
8081
8082                                 verboseadd = ""
8083                                 repoadd = None
8084
8085                                 if True:
8086                                         # USE flag display
8087                                         forced_flags = set()
8088                                         pkgsettings.setcpv(pkg) # for package.use.{mask,force}
8089                                         forced_flags.update(pkgsettings.useforce)
8090                                         forced_flags.update(pkgsettings.usemask)
8091
8092                                         cur_use = [flag for flag in pkg.use.enabled \
8093                                                 if flag in pkg.iuse.all]
8094                                         cur_iuse = sorted(pkg.iuse.all)
8095
8096                                         if myoldbest and myinslotlist:
8097                                                 previous_cpv = myoldbest[0]
8098                                         else:
8099                                                 previous_cpv = pkg.cpv
8100                                         if vardb.cpv_exists(previous_cpv):
8101                                                 old_iuse, old_use = vardb.aux_get(
8102                                                                 previous_cpv, ["IUSE", "USE"])
8103                                                 old_iuse = list(set(
8104                                                         filter_iuse_defaults(old_iuse.split())))
8105                                                 old_iuse.sort()
8106                                                 old_use = old_use.split()
8107                                                 is_new = False
8108                                         else:
8109                                                 old_iuse = []
8110                                                 old_use = []
8111                                                 is_new = True
8112
8113                                         old_use = [flag for flag in old_use if flag in old_iuse]
8114
8115                                         use_expand = pkgsettings["USE_EXPAND"].lower().split()
8116                                         use_expand.sort()
8117                                         use_expand.reverse()
8118                                         use_expand_hidden = \
8119                                                 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
8120
8121                                         def map_to_use_expand(myvals, forcedFlags=False,
8122                                                 removeHidden=True):
8123                                                 ret = {}
8124                                                 forced = {}
8125                                                 for exp in use_expand:
8126                                                         ret[exp] = []
8127                                                         forced[exp] = set()
8128                                                         for val in myvals[:]:
8129                                                                 if val.startswith(exp.lower()+"_"):
8130                                                                         if val in forced_flags:
8131                                                                                 forced[exp].add(val[len(exp)+1:])
8132                                                                         ret[exp].append(val[len(exp)+1:])
8133                                                                         myvals.remove(val)
8134                                                 ret["USE"] = myvals
8135                                                 forced["USE"] = [val for val in myvals \
8136                                                         if val in forced_flags]
8137                                                 if removeHidden:
8138                                                         for exp in use_expand_hidden:
8139                                                                 ret.pop(exp, None)
8140                                                 if forcedFlags:
8141                                                         return ret, forced
8142                                                 return ret
8143
8144                                         # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
8145                                         # are the only thing that triggered reinstallation.
8146                                         reinst_flags_map = {}
8147                                         reinstall_for_flags = self._reinstall_nodes.get(pkg)
8148                                         reinst_expand_map = None
8149                                         if reinstall_for_flags:
8150                                                 reinst_flags_map = map_to_use_expand(
8151                                                         list(reinstall_for_flags), removeHidden=False)
8152                                                 for k in list(reinst_flags_map):
8153                                                         if not reinst_flags_map[k]:
8154                                                                 del reinst_flags_map[k]
8155                                                 if not reinst_flags_map.get("USE"):
8156                                                         reinst_expand_map = reinst_flags_map.copy()
8157                                                         reinst_expand_map.pop("USE", None)
8158                                         if reinst_expand_map and \
8159                                                 not set(reinst_expand_map).difference(
8160                                                 use_expand_hidden):
8161                                                 use_expand_hidden = \
8162                                                         set(use_expand_hidden).difference(
8163                                                         reinst_expand_map)
8164
8165                                         cur_iuse_map, iuse_forced = \
8166                                                 map_to_use_expand(cur_iuse, forcedFlags=True)
8167                                         cur_use_map = map_to_use_expand(cur_use)
8168                                         old_iuse_map = map_to_use_expand(old_iuse)
8169                                         old_use_map = map_to_use_expand(old_use)
8170
8171                                         use_expand.sort()
8172                                         use_expand.insert(0, "USE")
8173                                         
8174                                         for key in use_expand:
8175                                                 if key in use_expand_hidden:
8176                                                         continue
8177                                                 verboseadd += create_use_string(key.upper(),
8178                                                         cur_iuse_map[key], iuse_forced[key],
8179                                                         cur_use_map[key], old_iuse_map[key],
8180                                                         old_use_map[key], is_new,
8181                                                         reinst_flags_map.get(key))
8182
8183                                 if verbosity == 3:
8184                                         # size verbose
8185                                         mysize=0
8186                                         if pkg_type == "ebuild" and pkg_merge:
8187                                                 try:
8188                                                         myfilesdict = portdb.getfetchsizes(pkg_key,
8189                                                                 useflags=pkg_use, debug=self.edebug)
8190                                                 except portage.exception.InvalidDependString, e:
8191                                                         src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
8192                                                         show_invalid_depstring_notice(x, src_uri, str(e))
8193                                                         del e
8194                                                         return 1
8195                                                 if myfilesdict is None:
8196                                                         myfilesdict="[empty/missing/bad digest]"
8197                                                 else:
8198                                                         for myfetchfile in myfilesdict:
8199                                                                 if myfetchfile not in myfetchlist:
8200                                                                         mysize+=myfilesdict[myfetchfile]
8201                                                                         myfetchlist.append(myfetchfile)
8202                                                         if ordered:
8203                                                                 counters.totalsize += mysize
8204                                                 verboseadd += format_size(mysize)
8205
8206                                         # overlay verbose
8207                                         # assign index for a previous version in the same slot
8208                                         has_previous = False
8209                                         repo_name_prev = None
8210                                         slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
8211                                                 metadata["SLOT"])
8212                                         slot_matches = vardb.match(slot_atom)
8213                                         if slot_matches:
8214                                                 has_previous = True
8215                                                 repo_name_prev = vardb.aux_get(slot_matches[0],
8216                                                         ["repository"])[0]
8217
8218                                         # now use the data to generate output
8219                                         if pkg.installed or not has_previous:
8220                                                 repoadd = repo_display.repoStr(repo_path_real)
8221                                         else:
8222                                                 repo_path_prev = None
8223                                                 if repo_name_prev:
8224                                                         repo_path_prev = portdb.getRepositoryPath(
8225                                                                 repo_name_prev)
8226                                                 if repo_path_prev == repo_path_real:
8227                                                         repoadd = repo_display.repoStr(repo_path_real)
8228                                                 else:
8229                                                         repoadd = "%s=>%s" % (
8230                                                                 repo_display.repoStr(repo_path_prev),
8231                                                                 repo_display.repoStr(repo_path_real))
8232                                         if repoadd:
8233                                                 repoadd_set.add(repoadd)
8234
8235                                 xs = [portage.cpv_getkey(pkg_key)] + \
8236                                         list(portage.catpkgsplit(pkg_key)[2:])
8237                                 if xs[2] == "r0":
8238                                         xs[2] = ""
8239                                 else:
8240                                         xs[2] = "-" + xs[2]
8241
8242                                 mywidth = 130
8243                                 if "COLUMNWIDTH" in self.settings:
8244                                         try:
8245                                                 mywidth = int(self.settings["COLUMNWIDTH"])
8246                                         except ValueError, e:
8247                                                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8248                                                 portage.writemsg(
8249                                                         "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
8250                                                         self.settings["COLUMNWIDTH"], noiselevel=-1)
8251                                                 del e
8252                                 oldlp = mywidth - 30
8253                                 newlp = oldlp - 30
8254
8255                                 # Convert myoldbest from a list to a string.
8256                                 if not myoldbest:
8257                                         myoldbest = ""
8258                                 else:
8259                                         for pos, key in enumerate(myoldbest):
8260                                                 key = portage.catpkgsplit(key)[2] + \
8261                                                         "-" + portage.catpkgsplit(key)[3]
8262                                                 if key[-3:] == "-r0":
8263                                                         key = key[:-3]
8264                                                 myoldbest[pos] = key
8265                                         myoldbest = blue("["+", ".join(myoldbest)+"]")
8266
8267                                 pkg_cp = xs[0]
8268                                 root_config = self.roots[myroot]
8269                                 system_set = root_config.sets["system"]
8270                                 world_set  = root_config.sets["world"]
8271
8272                                 pkg_system = False
8273                                 pkg_world = False
8274                                 try:
8275                                         pkg_system = system_set.findAtomForPackage(pkg)
8276                                         pkg_world  = world_set.findAtomForPackage(pkg)
8277                                         if not (oneshot or pkg_world) and \
8278                                                 myroot == self.target_root and \
8279                                                 favorites_set.findAtomForPackage(pkg):
8280                                                 # Maybe it will be added to world now.
8281                                                 if create_world_atom(pkg, favorites_set, root_config):
8282                                                         pkg_world = True
8283                                 except portage.exception.InvalidDependString:
8284                                         # This is reported elsewhere if relevant.
8285                                         pass
8286
8287                                 def pkgprint(pkg_str):
8288                                         if pkg_merge:
8289                                                 if pkg_system:
8290                                                         return colorize("PKG_MERGE_SYSTEM", pkg_str)
8291                                                 elif pkg_world:
8292                                                         return colorize("PKG_MERGE_WORLD", pkg_str)
8293                                                 else:
8294                                                         return colorize("PKG_MERGE", pkg_str)
8295                                         elif pkg_status == "uninstall":
8296                                                 return colorize("PKG_UNINSTALL", pkg_str)
8297                                         else:
8298                                                 if pkg_system:
8299                                                         return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8300                                                 elif pkg_world:
8301                                                         return colorize("PKG_NOMERGE_WORLD", pkg_str)
8302                                                 else:
8303                                                         return colorize("PKG_NOMERGE", pkg_str)
8304
8305                                 try:
8306                                         properties = flatten(use_reduce(paren_reduce(
8307                                                 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8308                                 except portage.exception.InvalidDependString, e:
8309                                         if not pkg.installed:
8310                                                 show_invalid_depstring_notice(pkg,
8311                                                         pkg.metadata["PROPERTIES"], str(e))
8312                                                 del e
8313                                                 return 1
8314                                         properties = []
8315                                 interactive = "interactive" in properties
8316                                 if interactive and pkg.operation == "merge":
8317                                         addl = colorize("WARN", "I") + addl[1:]
8318                                         if ordered:
8319                                                 counters.interactive += 1
8320
8321                                 if x[1]!="/":
8322                                         if myoldbest:
8323                                                 myoldbest +=" "
8324                                         if "--columns" in self.myopts:
8325                                                 if "--quiet" in self.myopts:
8326                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8327                                                         myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8328                                                         myprint=myprint+myoldbest
8329                                                         myprint=myprint+darkgreen("to "+x[1])
8330                                                         verboseadd = None
8331                                                 else:
8332                                                         if not pkg_merge:
8333                                                                 myprint = "[%s] %s%s" % \
8334                                                                         (pkgprint(pkg_status.ljust(13)),
8335                                                                         indent, pkgprint(pkg.cp))
8336                                                         else:
8337                                                                 myprint = "[%s %s] %s%s" % \
8338                                                                         (pkgprint(pkg.type_name), addl,
8339                                                                         indent, pkgprint(pkg.cp))
8340                                                         if (newlp-nc_len(myprint)) > 0:
8341                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8342                                                         myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8343                                                         if (oldlp-nc_len(myprint)) > 0:
8344                                                                 myprint=myprint+" "*(oldlp-nc_len(myprint))
8345                                                         myprint=myprint+myoldbest
8346                                                         myprint += darkgreen("to " + pkg.root)
8347                                         else:
8348                                                 if not pkg_merge:
8349                                                         myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8350                                                 else:
8351                                                         myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
8352                                                 myprint += indent + pkgprint(pkg_key) + " " + \
8353                                                         myoldbest + darkgreen("to " + myroot)
8354                                 else:
8355                                         if "--columns" in self.myopts:
8356                                                 if "--quiet" in self.myopts:
8357                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8358                                                         myprint=myprint+" "+green(xs[1]+xs[2])+" "
8359                                                         myprint=myprint+myoldbest
8360                                                         verboseadd = None
8361                                                 else:
8362                                                         if not pkg_merge:
8363                                                                 myprint = "[%s] %s%s" % \
8364                                                                         (pkgprint(pkg_status.ljust(13)),
8365                                                                         indent, pkgprint(pkg.cp))
8366                                                         else:
8367                                                                 myprint = "[%s %s] %s%s" % \
8368                                                                         (pkgprint(pkg.type_name), addl,
8369                                                                         indent, pkgprint(pkg.cp))
8370                                                         if (newlp-nc_len(myprint)) > 0:
8371                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8372                                                         myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8373                                                         if (oldlp-nc_len(myprint)) > 0:
8374                                                                 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8375                                                         myprint += myoldbest
8376                                         else:
8377                                                 if not pkg_merge:
8378                                                         myprint = "[%s] %s%s %s" % \
8379                                                                 (pkgprint(pkg_status.ljust(13)),
8380                                                                 indent, pkgprint(pkg.cpv),
8381                                                                 myoldbest)
8382                                                 else:
8383                                                         myprint = "[%s %s] %s%s %s" % \
8384                                                                 (pkgprint(pkg_type), addl, indent,
8385                                                                 pkgprint(pkg.cpv), myoldbest)
8386
8387                                 if columns and pkg.operation == "uninstall":
8388                                         continue
8389                                 p.append((myprint, verboseadd, repoadd))
8390
8391                                 if "--tree" not in self.myopts and \
8392                                         "--quiet" not in self.myopts and \
8393                                         not self._opts_no_restart.intersection(self.myopts) and \
8394                                         pkg.root == self._running_root.root and \
8395                                         portage.match_from_list(
8396                                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8397                                         not vardb.cpv_exists(pkg.cpv) and \
8398                                         "--quiet" not in self.myopts:
8399                                                 if mylist_index < len(mylist) - 1:
8400                                                         p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8401                                                         p.append(colorize("WARN", "    then resume the merge."))
8402
8403                 out = sys.stdout
8404                 show_repos = repoadd_set and repoadd_set != set(["0"])
8405
8406                 for x in p:
8407                         if isinstance(x, basestring):
8408                                 out.write("%s\n" % (x,))
8409                                 continue
8410
8411                         myprint, verboseadd, repoadd = x
8412
8413                         if verboseadd:
8414                                 myprint += " " + verboseadd
8415
8416                         if show_repos and repoadd:
8417                                 myprint += " " + teal("[%s]" % repoadd)
8418
8419                         out.write("%s\n" % (myprint,))
8420
8421                 for x in blockers:
8422                         print x
8423
8424                 if verbosity == 3:
8425                         print
8426                         print counters
8427                         if show_repos:
8428                                 sys.stdout.write(str(repo_display))
8429
8430                 if "--changelog" in self.myopts:
8431                         print
8432                         for revision,text in changelogs:
8433                                 print bold('*'+revision)
8434                                 sys.stdout.write(text)
8435
8436                 sys.stdout.flush()
8437                 return os.EX_OK
8438
8439         def display_problems(self):
8440                 """
8441                 Display problems with the dependency graph such as slot collisions.
8442                 This is called internally by display() to show the problems _after_
8443                 the merge list where it is most likely to be seen, but if display()
8444                 is not going to be called then this method should be called explicitly
8445                 to ensure that the user is notified of problems with the graph.
8446
8447                 All output goes to stderr, except for unsatisfied dependencies which
8448                 go to stdout for parsing by programs such as autounmask.
8449                 """
8450
8451                 # Note that show_masked_packages() sends it's output to
8452                 # stdout, and some programs such as autounmask parse the
8453                 # output in cases when emerge bails out. However, when
8454                 # show_masked_packages() is called for installed packages
8455                 # here, the message is a warning that is more appropriate
8456                 # to send to stderr, so temporarily redirect stdout to
8457                 # stderr. TODO: Fix output code so there's a cleaner way
8458                 # to redirect everything to stderr.
8459                 sys.stdout.flush()
8460                 sys.stderr.flush()
8461                 stdout = sys.stdout
8462                 try:
8463                         sys.stdout = sys.stderr
8464                         self._display_problems()
8465                 finally:
8466                         sys.stdout = stdout
8467                         sys.stdout.flush()
8468                         sys.stderr.flush()
8469
8470                 # This goes to stdout for parsing by programs like autounmask.
8471                 for pargs, kwargs in self._unsatisfied_deps_for_display:
8472                         self._show_unsatisfied_dep(*pargs, **kwargs)
8473
8474         def _display_problems(self):
8475                 if self._circular_deps_for_display is not None:
8476                         self._show_circular_deps(
8477                                 self._circular_deps_for_display)
8478
8479                 # The user is only notified of a slot conflict if
8480                 # there are no unresolvable blocker conflicts.
8481                 if self._unsatisfied_blockers_for_display is not None:
8482                         self._show_unsatisfied_blockers(
8483                                 self._unsatisfied_blockers_for_display)
8484                 else:
8485                         self._show_slot_collision_notice()
8486
8487                 # TODO: Add generic support for "set problem" handlers so that
8488                 # the below warnings aren't special cases for world only.
8489
8490                 if self._missing_args:
8491                         world_problems = False
8492                         if "world" in self._sets:
8493                                 # Filter out indirect members of world (from nested sets)
8494                                 # since only direct members of world are desired here.
8495                                 world_set = self.roots[self.target_root].sets["world"]
8496                                 for arg, atom in self._missing_args:
8497                                         if arg.name == "world" and atom in world_set:
8498                                                 world_problems = True
8499                                                 break
8500
8501                         if world_problems:
8502                                 sys.stderr.write("\n!!! Problems have been " + \
8503                                         "detected with your world file\n")
8504                                 sys.stderr.write("!!! Please run " + \
8505                                         green("emaint --check world")+"\n\n")
8506
8507                 if self._missing_args:
8508                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8509                                 " Ebuilds for the following packages are either all\n")
8510                         sys.stderr.write(colorize("BAD", "!!!") + \
8511                                 " masked or don't exist:\n")
8512                         sys.stderr.write(" ".join(str(atom) for arg, atom in \
8513                                 self._missing_args) + "\n")
8514
8515                 if self._pprovided_args:
8516                         arg_refs = {}
8517                         for arg, atom in self._pprovided_args:
8518                                 if isinstance(arg, SetArg):
8519                                         parent = arg.name
8520                                         arg_atom = (atom, atom)
8521                                 else:
8522                                         parent = "args"
8523                                         arg_atom = (arg.arg, atom)
8524                                 refs = arg_refs.setdefault(arg_atom, [])
8525                                 if parent not in refs:
8526                                         refs.append(parent)
8527                         msg = []
8528                         msg.append(bad("\nWARNING: "))
8529                         if len(self._pprovided_args) > 1:
8530                                 msg.append("Requested packages will not be " + \
8531                                         "merged because they are listed in\n")
8532                         else:
8533                                 msg.append("A requested package will not be " + \
8534                                         "merged because it is listed in\n")
8535                         msg.append("package.provided:\n\n")
8536                         problems_sets = set()
8537                         for (arg, atom), refs in arg_refs.iteritems():
8538                                 ref_string = ""
8539                                 if refs:
8540                                         problems_sets.update(refs)
8541                                         refs.sort()
8542                                         ref_string = ", ".join(["'%s'" % name for name in refs])
8543                                         ref_string = " pulled in by " + ref_string
8544                                 msg.append("  %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8545                         msg.append("\n")
8546                         if "world" in problems_sets:
8547                                 msg.append("This problem can be solved in one of the following ways:\n\n")
8548                                 msg.append("  A) Use emaint to clean offending packages from world (if not installed).\n")
8549                                 msg.append("  B) Uninstall offending packages (cleans them from world).\n")
8550                                 msg.append("  C) Remove offending entries from package.provided.\n\n")
8551                                 msg.append("The best course of action depends on the reason that an offending\n")
8552                                 msg.append("package.provided entry exists.\n\n")
8553                         sys.stderr.write("".join(msg))
8554
8555                 masked_packages = []
8556                 for pkg in self._masked_installed:
8557                         root_config = pkg.root_config
8558                         pkgsettings = self.pkgsettings[pkg.root]
8559                         mreasons = get_masking_status(pkg, pkgsettings, root_config)
8560                         masked_packages.append((root_config, pkgsettings,
8561                                 pkg.cpv, pkg.metadata, mreasons))
8562                 if masked_packages:
8563                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8564                                 " The following installed packages are masked:\n")
8565                         show_masked_packages(masked_packages)
8566                         show_mask_docs()
8567                         print
8568
8569         def calc_changelog(self,ebuildpath,current,next):
8570                 if ebuildpath == None or not os.path.exists(ebuildpath):
8571                         return []
8572                 current = '-'.join(portage.catpkgsplit(current)[1:])
8573                 if current.endswith('-r0'):
8574                         current = current[:-3]
8575                 next = '-'.join(portage.catpkgsplit(next)[1:])
8576                 if next.endswith('-r0'):
8577                         next = next[:-3]
8578                 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8579                 try:
8580                         changelog = open(changelogpath).read()
8581                 except SystemExit, e:
8582                         raise # Needed else can't exit
8583                 except:
8584                         return []
8585                 divisions = self.find_changelog_tags(changelog)
8586                 #print 'XX from',current,'to',next
8587                 #for div,text in divisions: print 'XX',div
8588                 # skip entries for all revisions above the one we are about to emerge
8589                 for i in range(len(divisions)):
8590                         if divisions[i][0]==next:
8591                                 divisions = divisions[i:]
8592                                 break
8593                 # find out how many entries we are going to display
8594                 for i in range(len(divisions)):
8595                         if divisions[i][0]==current:
8596                                 divisions = divisions[:i]
8597                                 break
8598                 else:
8599                     # couldnt find the current revision in the list. display nothing
8600                         return []
8601                 return divisions
8602
8603         def find_changelog_tags(self,changelog):
8604                 divs = []
8605                 release = None
8606                 while 1:
8607                         match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8608                         if match is None:
8609                                 if release is not None:
8610                                         divs.append((release,changelog))
8611                                 return divs
8612                         if release is not None:
8613                                 divs.append((release,changelog[:match.start()]))
8614                         changelog = changelog[match.end():]
8615                         release = match.group(1)
8616                         if release.endswith('.ebuild'):
8617                                 release = release[:-7]
8618                         if release.endswith('-r0'):
8619                                 release = release[:-3]
8620
8621         def saveNomergeFavorites(self):
8622                 """Find atoms in favorites that are not in the mergelist and add them
8623                 to the world file if necessary."""
8624                 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8625                         "--oneshot", "--onlydeps", "--pretend"):
8626                         if x in self.myopts:
8627                                 return
8628                 root_config = self.roots[self.target_root]
8629                 world_set = root_config.sets["world"]
8630
8631                 world_locked = False
8632                 if hasattr(world_set, "lock"):
8633                         world_set.lock()
8634                         world_locked = True
8635
8636                 if hasattr(world_set, "load"):
8637                         world_set.load() # maybe it's changed on disk
8638
8639                 args_set = self._sets["args"]
8640                 portdb = self.trees[self.target_root]["porttree"].dbapi
8641                 added_favorites = set()
8642                 for x in self._set_nodes:
8643                         pkg_type, root, pkg_key, pkg_status = x
8644                         if pkg_status != "nomerge":
8645                                 continue
8646
8647                         try:
8648                                 myfavkey = create_world_atom(x, args_set, root_config)
8649                                 if myfavkey:
8650                                         if myfavkey in added_favorites:
8651                                                 continue
8652                                         added_favorites.add(myfavkey)
8653                         except portage.exception.InvalidDependString, e:
8654                                 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8655                                         (pkg_key, str(e)), noiselevel=-1)
8656                                 writemsg("!!! see '%s'\n\n" % os.path.join(
8657                                         root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8658                                 del e
8659                 all_added = []
8660                 for k in self._sets:
8661                         if k in ("args", "world") or not root_config.sets[k].world_candidate:
8662                                 continue
8663                         s = SETPREFIX + k
8664                         if s in world_set:
8665                                 continue
8666                         all_added.append(SETPREFIX + k)
8667                 all_added.extend(added_favorites)
8668                 all_added.sort()
8669                 for a in all_added:
8670                         print ">>> Recording %s in \"world\" favorites file..." % \
8671                                 colorize("INFORM", str(a))
8672                 if all_added:
8673                         world_set.update(all_added)
8674
8675                 if world_locked:
8676                         world_set.unlock()
8677
8678         def loadResumeCommand(self, resume_data, skip_masked=False):
8679                 """
8680                 Add a resume command to the graph and validate it in the process.  This
8681                 will raise a PackageNotFound exception if a package is not available.
8682                 """
8683
8684                 if not isinstance(resume_data, dict):
8685                         return False
8686
8687                 mergelist = resume_data.get("mergelist")
8688                 if not isinstance(mergelist, list):
8689                         mergelist = []
8690
8691                 fakedb = self.mydbapi
8692                 trees = self.trees
8693                 serialized_tasks = []
8694                 masked_tasks = []
8695                 for x in mergelist:
8696                         if not (isinstance(x, list) and len(x) == 4):
8697                                 continue
8698                         pkg_type, myroot, pkg_key, action = x
8699                         if pkg_type not in self.pkg_tree_map:
8700                                 continue
8701                         if action != "merge":
8702                                 continue
8703                         tree_type = self.pkg_tree_map[pkg_type]
8704                         mydb = trees[myroot][tree_type].dbapi
8705                         db_keys = list(self._trees_orig[myroot][
8706                                 tree_type].dbapi._aux_cache_keys)
8707                         try:
8708                                 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8709                         except KeyError:
8710                                 # It does no exist or it is corrupt.
8711                                 if action == "uninstall":
8712                                         continue
8713                                 raise portage.exception.PackageNotFound(pkg_key)
8714                         installed = action == "uninstall"
8715                         built = pkg_type != "ebuild"
8716                         root_config = self.roots[myroot]
8717                         pkg = Package(built=built, cpv=pkg_key,
8718                                 installed=installed, metadata=metadata,
8719                                 operation=action, root_config=root_config,
8720                                 type_name=pkg_type)
8721                         if pkg_type == "ebuild":
8722                                 pkgsettings = self.pkgsettings[myroot]
8723                                 pkgsettings.setcpv(pkg)
8724                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8725                                 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
8726                         self._pkg_cache[pkg] = pkg
8727
8728                         root_config = self.roots[pkg.root]
8729                         if "merge" == pkg.operation and \
8730                                 not visible(root_config.settings, pkg):
8731                                 if skip_masked:
8732                                         masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8733                                 else:
8734                                         self._unsatisfied_deps_for_display.append(
8735                                                 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8736
8737                         fakedb[myroot].cpv_inject(pkg)
8738                         serialized_tasks.append(pkg)
8739                         self.spinner.update()
8740
8741                 if self._unsatisfied_deps_for_display:
8742                         return False
8743
8744                 if not serialized_tasks or "--nodeps" in self.myopts:
8745                         self._serialized_tasks_cache = serialized_tasks
8746                         self._scheduler_graph = self.digraph
8747                 else:
8748                         self._select_package = self._select_pkg_from_graph
8749                         self.myparams.add("selective")
8750                         # Always traverse deep dependencies in order to account for
8751                         # potentially unsatisfied dependencies of installed packages.
8752                         # This is necessary for correct --keep-going or --resume operation
8753                         # in case a package from a group of circularly dependent packages
8754                         # fails. In this case, a package which has recently been installed
8755                         # may have an unsatisfied circular dependency (pulled in by
8756                         # PDEPEND, for example). So, even though a package is already
8757                         # installed, it may not have all of it's dependencies satisfied, so
8758                         # it may not be usable. If such a package is in the subgraph of
8759                         # deep depenedencies of a scheduled build, that build needs to
8760                         # be cancelled. In order for this type of situation to be
8761                         # recognized, deep traversal of dependencies is required.
8762                         self.myparams.add("deep")
8763
8764                         favorites = resume_data.get("favorites")
8765                         args_set = self._sets["args"]
8766                         if isinstance(favorites, list):
8767                                 args = self._load_favorites(favorites)
8768                         else:
8769                                 args = []
8770
8771                         for task in serialized_tasks:
8772                                 if isinstance(task, Package) and \
8773                                         task.operation == "merge":
8774                                         if not self._add_pkg(task, None):
8775                                                 return False
8776
8777                         # Packages for argument atoms need to be explicitly
8778                         # added via _add_pkg() so that they are included in the
8779                         # digraph (needed at least for --tree display).
8780                         for arg in args:
8781                                 for atom in arg.set:
8782                                         pkg, existing_node = self._select_package(
8783                                                 arg.root_config.root, atom)
8784                                         if existing_node is None and \
8785                                                 pkg is not None:
8786                                                 if not self._add_pkg(pkg, Dependency(atom=atom,
8787                                                         root=pkg.root, parent=arg)):
8788                                                         return False
8789
8790                         # Allow unsatisfied deps here to avoid showing a masking
8791                         # message for an unsatisfied dep that isn't necessarily
8792                         # masked.
8793                         if not self._create_graph(allow_unsatisfied=True):
8794                                 return False
8795
8796                         unsatisfied_deps = []
8797                         for dep in self._unsatisfied_deps:
8798                                 if not isinstance(dep.parent, Package):
8799                                         continue
8800                                 if dep.parent.operation == "merge":
8801                                         unsatisfied_deps.append(dep)
8802                                         continue
8803
8804                                 # For unsatisfied deps of installed packages, only account for
8805                                 # them if they are in the subgraph of dependencies of a package
8806                                 # which is scheduled to be installed.
8807                                 unsatisfied_install = False
8808                                 traversed = set()
8809                                 dep_stack = self.digraph.parent_nodes(dep.parent)
8810                                 while dep_stack:
8811                                         node = dep_stack.pop()
8812                                         if not isinstance(node, Package):
8813                                                 continue
8814                                         if node.operation == "merge":
8815                                                 unsatisfied_install = True
8816                                                 break
8817                                         if node in traversed:
8818                                                 continue
8819                                         traversed.add(node)
8820                                         dep_stack.extend(self.digraph.parent_nodes(node))
8821
8822                                 if unsatisfied_install:
8823                                         unsatisfied_deps.append(dep)
8824
8825                         if masked_tasks or unsatisfied_deps:
8826                                 # This probably means that a required package
8827                                 # was dropped via --skipfirst. It makes the
8828                                 # resume list invalid, so convert it to a
8829                                 # UnsatisfiedResumeDep exception.
8830                                 raise self.UnsatisfiedResumeDep(self,
8831                                         masked_tasks + unsatisfied_deps)
8832                         self._serialized_tasks_cache = None
8833                         try:
8834                                 self.altlist()
8835                         except self._unknown_internal_error:
8836                                 return False
8837
8838                 return True
8839
8840         def _load_favorites(self, favorites):
8841                 """
8842                 Use a list of favorites to resume state from a
8843                 previous select_files() call. This creates similar
8844                 DependencyArg instances to those that would have
8845                 been created by the original select_files() call.
8846                 This allows Package instances to be matched with
8847                 DependencyArg instances during graph creation.
8848                 """
8849                 root_config = self.roots[self.target_root]
8850                 getSetAtoms = root_config.setconfig.getSetAtoms
8851                 sets = root_config.sets
8852                 args = []
8853                 for x in favorites:
8854                         if not isinstance(x, basestring):
8855                                 continue
8856                         if x in ("system", "world"):
8857                                 x = SETPREFIX + x
8858                         if x.startswith(SETPREFIX):
8859                                 s = x[len(SETPREFIX):]
8860                                 if s not in sets:
8861                                         continue
8862                                 if s in self._sets:
8863                                         continue
8864                                 # Recursively expand sets so that containment tests in
8865                                 # self._get_parent_sets() properly match atoms in nested
8866                                 # sets (like if world contains system).
8867                                 expanded_set = InternalPackageSet(
8868                                         initial_atoms=getSetAtoms(s))
8869                                 self._sets[s] = expanded_set
8870                                 args.append(SetArg(arg=x, set=expanded_set,
8871                                         root_config=root_config))
8872                         else:
8873                                 if not portage.isvalidatom(x):
8874                                         continue
8875                                 args.append(AtomArg(arg=x, atom=x,
8876                                         root_config=root_config))
8877
8878                 self._set_args(args)
8879                 return args
8880
8881         class UnsatisfiedResumeDep(portage.exception.PortageException):
8882                 """
8883                 A dependency of a resume list is not installed. This
8884                 can occur when a required package is dropped from the
8885                 merge list via --skipfirst.
8886                 """
8887                 def __init__(self, depgraph, value):
8888                         portage.exception.PortageException.__init__(self, value)
8889                         self.depgraph = depgraph
8890
8891         class _internal_exception(portage.exception.PortageException):
8892                 def __init__(self, value=""):
8893                         portage.exception.PortageException.__init__(self, value)
8894
8895         class _unknown_internal_error(_internal_exception):
8896                 """
8897                 Used by the depgraph internally to terminate graph creation.
8898                 The specific reason for the failure should have been dumped
8899                 to stderr, unfortunately, the exact reason for the failure
8900                 may not be known.
8901                 """
8902
8903         class _serialize_tasks_retry(_internal_exception):
8904                 """
8905                 This is raised by the _serialize_tasks() method when it needs to
8906                 be called again for some reason. The only case that it's currently
8907                 used for is when neglected dependencies need to be added to the
8908                 graph in order to avoid making a potentially unsafe decision.
8909                 """
8910
8911         class _dep_check_composite_db(portage.dbapi):
8912                 """
8913                 A dbapi-like interface that is optimized for use in dep_check() calls.
8914                 This is built on top of the existing depgraph package selection logic.
8915                 Some packages that have been added to the graph may be masked from this
8916                 view in order to influence the atom preference selection that occurs
8917                 via dep_check().
8918                 """
8919                 def __init__(self, depgraph, root):
8920                         portage.dbapi.__init__(self)
8921                         self._depgraph = depgraph
8922                         self._root = root
8923                         self._match_cache = {}
8924                         self._cpv_pkg_map = {}
8925
8926                 def _clear_cache(self):
8927                         self._match_cache.clear()
8928                         self._cpv_pkg_map.clear()
8929
8930                 def match(self, atom):
8931                         ret = self._match_cache.get(atom)
8932                         if ret is not None:
8933                                 return ret[:]
8934                         orig_atom = atom
8935                         if "/" not in atom:
8936                                 atom = self._dep_expand(atom)
8937                         pkg, existing = self._depgraph._select_package(self._root, atom)
8938                         if not pkg:
8939                                 ret = []
8940                         else:
8941                                 # Return the highest available from select_package() as well as
8942                                 # any matching slots in the graph db.
8943                                 slots = set()
8944                                 slots.add(pkg.metadata["SLOT"])
8945                                 atom_cp = portage.dep_getkey(atom)
8946                                 if pkg.cp.startswith("virtual/"):
8947                                         # For new-style virtual lookahead that occurs inside
8948                                         # dep_check(), examine all slots. This is needed
8949                                         # so that newer slots will not unnecessarily be pulled in
8950                                         # when a satisfying lower slot is already installed. For
8951                                         # example, if virtual/jdk-1.4 is satisfied via kaffe then
8952                                         # there's no need to pull in a newer slot to satisfy a
8953                                         # virtual/jdk dependency.
8954                                         for db, pkg_type, built, installed, db_keys in \
8955                                                 self._depgraph._filtered_trees[self._root]["dbs"]:
8956                                                 for cpv in db.match(atom):
8957                                                         if portage.cpv_getkey(cpv) != pkg.cp:
8958                                                                 continue
8959                                                         slots.add(db.aux_get(cpv, ["SLOT"])[0])
8960                                 ret = []
8961                                 if self._visible(pkg):
8962                                         self._cpv_pkg_map[pkg.cpv] = pkg
8963                                         ret.append(pkg.cpv)
8964                                 slots.remove(pkg.metadata["SLOT"])
8965                                 while slots:
8966                                         slot_atom = "%s:%s" % (atom_cp, slots.pop())
8967                                         pkg, existing = self._depgraph._select_package(
8968                                                 self._root, slot_atom)
8969                                         if not pkg:
8970                                                 continue
8971                                         if not self._visible(pkg):
8972                                                 continue
8973                                         self._cpv_pkg_map[pkg.cpv] = pkg
8974                                         ret.append(pkg.cpv)
8975                                 if ret:
8976                                         self._cpv_sort_ascending(ret)
8977                         self._match_cache[orig_atom] = ret
8978                         return ret[:]
8979
8980                 def _visible(self, pkg):
8981                         if pkg.installed and "selective" not in self._depgraph.myparams:
8982                                 try:
8983                                         arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
8984                                 except (StopIteration, portage.exception.InvalidDependString):
8985                                         arg = None
8986                                 if arg:
8987                                         return False
8988                         if pkg.installed:
8989                                 try:
8990                                         if not visible(
8991                                                 self._depgraph.pkgsettings[pkg.root], pkg):
8992                                                 return False
8993                                 except portage.exception.InvalidDependString:
8994                                         pass
8995                         in_graph = self._depgraph._slot_pkg_map[
8996                                 self._root].get(pkg.slot_atom)
8997                         if in_graph is None:
8998                                 # Mask choices for packages which are not the highest visible
8999                                 # version within their slot (since they usually trigger slot
9000                                 # conflicts).
9001                                 highest_visible, in_graph = self._depgraph._select_package(
9002                                         self._root, pkg.slot_atom)
9003                                 if pkg != highest_visible:
9004                                         return False
9005                         elif in_graph != pkg:
9006                                 # Mask choices for packages that would trigger a slot
9007                                 # conflict with a previously selected package.
9008                                 return False
9009                         return True
9010
9011                 def _dep_expand(self, atom):
9012                         """
9013                         This is only needed for old installed packages that may
9014                         contain atoms that are not fully qualified with a specific
9015                         category. Emulate the cpv_expand() function that's used by
9016                         dbapi.match() in cases like this. If there are multiple
9017                         matches, it's often due to a new-style virtual that has
9018                         been added, so try to filter those out to avoid raising
9019                         a ValueError.
9020                         """
9021                         root_config = self._depgraph.roots[self._root]
9022                         orig_atom = atom
9023                         expanded_atoms = self._depgraph._dep_expand(root_config, atom)
9024                         if len(expanded_atoms) > 1:
9025                                 non_virtual_atoms = []
9026                                 for x in expanded_atoms:
9027                                         if not portage.dep_getkey(x).startswith("virtual/"):
9028                                                 non_virtual_atoms.append(x)
9029                                 if len(non_virtual_atoms) == 1:
9030                                         expanded_atoms = non_virtual_atoms
9031                         if len(expanded_atoms) > 1:
9032                                 # compatible with portage.cpv_expand()
9033                                 raise portage.exception.AmbiguousPackageName(
9034                                         [portage.dep_getkey(x) for x in expanded_atoms])
9035                         if expanded_atoms:
9036                                 atom = expanded_atoms[0]
9037                         else:
9038                                 null_atom = insert_category_into_atom(atom, "null")
9039                                 null_cp = portage.dep_getkey(null_atom)
9040                                 cat, atom_pn = portage.catsplit(null_cp)
9041                                 virts_p = root_config.settings.get_virts_p().get(atom_pn)
9042                                 if virts_p:
9043                                         # Allow the resolver to choose which virtual.
9044                                         atom = insert_category_into_atom(atom, "virtual")
9045                                 else:
9046                                         atom = insert_category_into_atom(atom, "null")
9047                         return atom
9048
9049                 def aux_get(self, cpv, wants):
9050                         metadata = self._cpv_pkg_map[cpv].metadata
9051                         return [metadata.get(x, "") for x in wants]
9052
9053 class RepoDisplay(object):
9054         def __init__(self, roots):
9055                 self._shown_repos = {}
9056                 self._unknown_repo = False
9057                 repo_paths = set()
9058                 for root_config in roots.itervalues():
9059                         portdir = root_config.settings.get("PORTDIR")
9060                         if portdir:
9061                                 repo_paths.add(portdir)
9062                         overlays = root_config.settings.get("PORTDIR_OVERLAY")
9063                         if overlays:
9064                                 repo_paths.update(overlays.split())
9065                 repo_paths = list(repo_paths)
9066                 self._repo_paths = repo_paths
9067                 self._repo_paths_real = [ os.path.realpath(repo_path) \
9068                         for repo_path in repo_paths ]
9069
9070                 # pre-allocate index for PORTDIR so that it always has index 0.
9071                 for root_config in roots.itervalues():
9072                         portdb = root_config.trees["porttree"].dbapi
9073                         portdir = portdb.porttree_root
9074                         if portdir:
9075                                 self.repoStr(portdir)
9076
9077         def repoStr(self, repo_path_real):
9078                 real_index = -1
9079                 if repo_path_real:
9080                         real_index = self._repo_paths_real.index(repo_path_real)
9081                 if real_index == -1:
9082                         s = "?"
9083                         self._unknown_repo = True
9084                 else:
9085                         shown_repos = self._shown_repos
9086                         repo_paths = self._repo_paths
9087                         repo_path = repo_paths[real_index]
9088                         index = shown_repos.get(repo_path)
9089                         if index is None:
9090                                 index = len(shown_repos)
9091                                 shown_repos[repo_path] = index
9092                         s = str(index)
9093                 return s
9094
9095         def __str__(self):
9096                 output = []
9097                 shown_repos = self._shown_repos
9098                 unknown_repo = self._unknown_repo
9099                 if shown_repos or self._unknown_repo:
9100                         output.append("Portage tree and overlays:\n")
9101                 show_repo_paths = list(shown_repos)
9102                 for repo_path, repo_index in shown_repos.iteritems():
9103                         show_repo_paths[repo_index] = repo_path
9104                 if show_repo_paths:
9105                         for index, repo_path in enumerate(show_repo_paths):
9106                                 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9107                 if unknown_repo:
9108                         output.append(" "+teal("[?]") + \
9109                                 " indicates that the source repository could not be determined\n")
9110                 return "".join(output)
9111
9112 class PackageCounters(object):
9113
9114         def __init__(self):
9115                 self.upgrades   = 0
9116                 self.downgrades = 0
9117                 self.new        = 0
9118                 self.newslot    = 0
9119                 self.reinst     = 0
9120                 self.uninst     = 0
9121                 self.blocks     = 0
9122                 self.blocks_satisfied         = 0
9123                 self.totalsize  = 0
9124                 self.restrict_fetch           = 0
9125                 self.restrict_fetch_satisfied = 0
9126                 self.interactive              = 0
9127
9128         def __str__(self):
9129                 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
9130                 myoutput = []
9131                 details = []
9132                 myoutput.append("Total: %s package" % total_installs)
9133                 if total_installs != 1:
9134                         myoutput.append("s")
9135                 if total_installs != 0:
9136                         myoutput.append(" (")
9137                 if self.upgrades > 0:
9138                         details.append("%s upgrade" % self.upgrades)
9139                         if self.upgrades > 1:
9140                                 details[-1] += "s"
9141                 if self.downgrades > 0:
9142                         details.append("%s downgrade" % self.downgrades)
9143                         if self.downgrades > 1:
9144                                 details[-1] += "s"
9145                 if self.new > 0:
9146                         details.append("%s new" % self.new)
9147                 if self.newslot > 0:
9148                         details.append("%s in new slot" % self.newslot)
9149                         if self.newslot > 1:
9150                                 details[-1] += "s"
9151                 if self.reinst > 0:
9152                         details.append("%s reinstall" % self.reinst)
9153                         if self.reinst > 1:
9154                                 details[-1] += "s"
9155                 if self.uninst > 0:
9156                         details.append("%s uninstall" % self.uninst)
9157                         if self.uninst > 1:
9158                                 details[-1] += "s"
9159                 if self.interactive > 0:
9160                         details.append("%s %s" % (self.interactive,
9161                                 colorize("WARN", "interactive")))
9162                 myoutput.append(", ".join(details))
9163                 if total_installs != 0:
9164                         myoutput.append(")")
9165                 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
9166                 if self.restrict_fetch:
9167                         myoutput.append("\nFetch Restriction: %s package" % \
9168                                 self.restrict_fetch)
9169                         if self.restrict_fetch > 1:
9170                                 myoutput.append("s")
9171                 if self.restrict_fetch_satisfied < self.restrict_fetch:
9172                         myoutput.append(bad(" (%s unsatisfied)") % \
9173                                 (self.restrict_fetch - self.restrict_fetch_satisfied))
9174                 if self.blocks > 0:
9175                         myoutput.append("\nConflict: %s block" % \
9176                                 self.blocks)
9177                         if self.blocks > 1:
9178                                 myoutput.append("s")
9179                         if self.blocks_satisfied < self.blocks:
9180                                 myoutput.append(bad(" (%s unsatisfied)") % \
9181                                         (self.blocks - self.blocks_satisfied))
9182                 return "".join(myoutput)
9183
9184 class PollSelectAdapter(PollConstants):
9185
9186         """
9187         Use select to emulate a poll object, for
9188         systems that don't support poll().
9189         """
9190
9191         def __init__(self):
9192                 self._registered = {}
9193                 self._select_args = [[], [], []]
9194
9195         def register(self, fd, *args):
9196                 """
9197                 Only POLLIN is currently supported!
9198                 """
9199                 if len(args) > 1:
9200                         raise TypeError(
9201                                 "register expected at most 2 arguments, got " + \
9202                                 repr(1 + len(args)))
9203
9204                 eventmask = PollConstants.POLLIN | \
9205                         PollConstants.POLLPRI | PollConstants.POLLOUT
9206                 if args:
9207                         eventmask = args[0]
9208
9209                 self._registered[fd] = eventmask
9210                 self._select_args = None
9211
9212         def unregister(self, fd):
9213                 self._select_args = None
9214                 del self._registered[fd]
9215
9216         def poll(self, *args):
9217                 if len(args) > 1:
9218                         raise TypeError(
9219                                 "poll expected at most 2 arguments, got " + \
9220                                 repr(1 + len(args)))
9221
9222                 timeout = None
9223                 if args:
9224                         timeout = args[0]
9225
9226                 select_args = self._select_args
9227                 if select_args is None:
9228                         select_args = [self._registered.keys(), [], []]
9229
9230                 if timeout is not None:
9231                         select_args = select_args[:]
9232                         # Translate poll() timeout args to select() timeout args:
9233                         #
9234                         #          | units        | value(s) for indefinite block
9235                         # ---------|--------------|------------------------------
9236                         #   poll   | milliseconds | omitted, negative, or None
9237                         # ---------|--------------|------------------------------
9238                         #   select | seconds      | omitted
9239                         # ---------|--------------|------------------------------
9240
9241                         if timeout is not None and timeout < 0:
9242                                 timeout = None
9243                         if timeout is not None:
9244                                 select_args.append(timeout / 1000)
9245
9246                 select_events = select.select(*select_args)
9247                 poll_events = []
9248                 for fd in select_events[0]:
9249                         poll_events.append((fd, PollConstants.POLLIN))
9250                 return poll_events
9251
9252 class SequentialTaskQueue(SlotObject):
9253
9254         __slots__ = ("max_jobs", "running_tasks") + \
9255                 ("_dirty", "_scheduling", "_task_queue")
9256
9257         def __init__(self, **kwargs):
9258                 SlotObject.__init__(self, **kwargs)
9259                 self._task_queue = deque()
9260                 self.running_tasks = set()
9261                 if self.max_jobs is None:
9262                         self.max_jobs = 1
9263                 self._dirty = True
9264
9265         def add(self, task):
9266                 self._task_queue.append(task)
9267                 self._dirty = True
9268
9269         def addFront(self, task):
9270                 self._task_queue.appendleft(task)
9271                 self._dirty = True
9272
9273         def schedule(self):
9274
9275                 if not self._dirty:
9276                         return False
9277
9278                 if not self:
9279                         return False
9280
9281                 if self._scheduling:
9282                         # Ignore any recursive schedule() calls triggered via
9283                         # self._task_exit().
9284                         return False
9285
9286                 self._scheduling = True
9287
9288                 task_queue = self._task_queue
9289                 running_tasks = self.running_tasks
9290                 max_jobs = self.max_jobs
9291                 state_changed = False
9292
9293                 while task_queue and \
9294                         (max_jobs is True or len(running_tasks) < max_jobs):
9295                         task = task_queue.popleft()
9296                         cancelled = getattr(task, "cancelled", None)
9297                         if not cancelled:
9298                                 running_tasks.add(task)
9299                                 task.addExitListener(self._task_exit)
9300                                 task.start()
9301                         state_changed = True
9302
9303                 self._dirty = False
9304                 self._scheduling = False
9305
9306                 return state_changed
9307
9308         def _task_exit(self, task):
9309                 """
9310                 Since we can always rely on exit listeners being called, the set of
9311                 running tasks is always pruned automatically and there is never any need
9312                 to actively prune it.
9313                 """
9314                 self.running_tasks.remove(task)
9315                 if self._task_queue:
9316                         self._dirty = True
9317
9318         def clear(self):
9319                 self._task_queue.clear()
9320                 running_tasks = self.running_tasks
9321                 while running_tasks:
9322                         task = running_tasks.pop()
9323                         task.removeExitListener(self._task_exit)
9324                         task.cancel()
9325                 self._dirty = False
9326
9327         def __nonzero__(self):
9328                 return bool(self._task_queue or self.running_tasks)
9329
9330         def __len__(self):
9331                 return len(self._task_queue) + len(self.running_tasks)
9332
9333 _can_poll_device = None
9334
9335 def can_poll_device():
9336         """
9337         Test if it's possible to use poll() on a device such as a pty. This
9338         is known to fail on Darwin.
9339         @rtype: bool
9340         @returns: True if poll() on a device succeeds, False otherwise.
9341         """
9342
9343         global _can_poll_device
9344         if _can_poll_device is not None:
9345                 return _can_poll_device
9346
9347         if not hasattr(select, "poll"):
9348                 _can_poll_device = False
9349                 return _can_poll_device
9350
9351         try:
9352                 dev_null = open('/dev/null', 'rb')
9353         except IOError:
9354                 _can_poll_device = False
9355                 return _can_poll_device
9356
9357         p = select.poll()
9358         p.register(dev_null.fileno(), PollConstants.POLLIN)
9359
9360         invalid_request = False
9361         for f, event in p.poll():
9362                 if event & PollConstants.POLLNVAL:
9363                         invalid_request = True
9364                         break
9365         dev_null.close()
9366
9367         _can_poll_device = not invalid_request
9368         return _can_poll_device
9369
9370 def create_poll_instance():
9371         """
9372         Create an instance of select.poll, or an instance of
9373         PollSelectAdapter there is no poll() implementation or
9374         it is broken somehow.
9375         """
9376         if can_poll_device():
9377                 return select.poll()
9378         return PollSelectAdapter()
9379
9380 getloadavg = getattr(os, "getloadavg", None)
9381 if getloadavg is None:
9382         def getloadavg():
9383                 """
9384                 Uses /proc/loadavg to emulate os.getloadavg().
9385                 Raises OSError if the load average was unobtainable.
9386                 """
9387                 try:
9388                         loadavg_str = open('/proc/loadavg').readline()
9389                 except IOError:
9390                         # getloadavg() is only supposed to raise OSError, so convert
9391                         raise OSError('unknown')
9392                 loadavg_split = loadavg_str.split()
9393                 if len(loadavg_split) < 3:
9394                         raise OSError('unknown')
9395                 loadavg_floats = []
9396                 for i in xrange(3):
9397                         try:
9398                                 loadavg_floats.append(float(loadavg_split[i]))
9399                         except ValueError:
9400                                 raise OSError('unknown')
9401                 return tuple(loadavg_floats)
9402
9403 class PollScheduler(object):
9404
9405         class _sched_iface_class(SlotObject):
9406                 __slots__ = ("register", "schedule", "unregister")
9407
9408         def __init__(self):
9409                 self._max_jobs = 1
9410                 self._max_load = None
9411                 self._jobs = 0
9412                 self._poll_event_queue = []
9413                 self._poll_event_handlers = {}
9414                 self._poll_event_handler_ids = {}
9415                 # Increment id for each new handler.
9416                 self._event_handler_id = 0
9417                 self._poll_obj = create_poll_instance()
9418                 self._scheduling = False
9419
9420         def _schedule(self):
9421                 """
9422                 Calls _schedule_tasks() and automatically returns early from
9423                 any recursive calls to this method that the _schedule_tasks()
9424                 call might trigger. This makes _schedule() safe to call from
9425                 inside exit listeners.
9426                 """
9427                 if self._scheduling:
9428                         return False
9429                 self._scheduling = True
9430                 try:
9431                         return self._schedule_tasks()
9432                 finally:
9433                         self._scheduling = False
9434
9435         def _running_job_count(self):
9436                 return self._jobs
9437
9438         def _can_add_job(self):
9439                 max_jobs = self._max_jobs
9440                 max_load = self._max_load
9441
9442                 if self._max_jobs is not True and \
9443                         self._running_job_count() >= self._max_jobs:
9444                         return False
9445
9446                 if max_load is not None and \
9447                         (max_jobs is True or max_jobs > 1) and \
9448                         self._running_job_count() >= 1:
9449                         try:
9450                                 avg1, avg5, avg15 = getloadavg()
9451                         except OSError:
9452                                 return False
9453
9454                         if avg1 >= max_load:
9455                                 return False
9456
9457                 return True
9458
9459         def _poll(self, timeout=None):
9460                 """
9461                 All poll() calls pass through here. The poll events
9462                 are added directly to self._poll_event_queue.
9463                 In order to avoid endless blocking, this raises
9464                 StopIteration if timeout is None and there are
9465                 no file descriptors to poll.
9466                 """
9467                 if not self._poll_event_handlers:
9468                         self._schedule()
9469                         if timeout is None and \
9470                                 not self._poll_event_handlers:
9471                                 raise StopIteration(
9472                                         "timeout is None and there are no poll() event handlers")
9473
9474                 # The following error is known to occur with Linux kernel versions
9475                 # less than 2.6.24:
9476                 #
9477                 #   select.error: (4, 'Interrupted system call')
9478                 #
9479                 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9480                 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9481                 # without any events.
9482                 while True:
9483                         try:
9484                                 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9485                                 break
9486                         except select.error, e:
9487                                 writemsg_level("\n!!! select error: %s\n" % (e,),
9488                                         level=logging.ERROR, noiselevel=-1)
9489                                 del e
9490                                 if timeout is not None:
9491                                         break
9492
9493         def _next_poll_event(self, timeout=None):
9494                 """
9495                 Since the _schedule_wait() loop is called by event
9496                 handlers from _poll_loop(), maintain a central event
9497                 queue for both of them to share events from a single
9498                 poll() call. In order to avoid endless blocking, this
9499                 raises StopIteration if timeout is None and there are
9500                 no file descriptors to poll.
9501                 """
9502                 if not self._poll_event_queue:
9503                         self._poll(timeout)
9504                 return self._poll_event_queue.pop()
9505
9506         def _poll_loop(self):
9507
9508                 event_handlers = self._poll_event_handlers
9509                 event_handled = False
9510
9511                 try:
9512                         while event_handlers:
9513                                 f, event = self._next_poll_event()
9514                                 handler, reg_id = event_handlers[f]
9515                                 handler(f, event)
9516                                 event_handled = True
9517                 except StopIteration:
9518                         event_handled = True
9519
9520                 if not event_handled:
9521                         raise AssertionError("tight loop")
9522
9523         def _schedule_yield(self):
9524                 """
9525                 Schedule for a short period of time chosen by the scheduler based
9526                 on internal state. Synchronous tasks should call this periodically
9527                 in order to allow the scheduler to service pending poll events. The
9528                 scheduler will call poll() exactly once, without blocking, and any
9529                 resulting poll events will be serviced.
9530                 """
9531                 event_handlers = self._poll_event_handlers
9532                 events_handled = 0
9533
9534                 if not event_handlers:
9535                         return bool(events_handled)
9536
9537                 if not self._poll_event_queue:
9538                         self._poll(0)
9539
9540                 try:
9541                         while event_handlers and self._poll_event_queue:
9542                                 f, event = self._next_poll_event()
9543                                 handler, reg_id = event_handlers[f]
9544                                 handler(f, event)
9545                                 events_handled += 1
9546                 except StopIteration:
9547                         events_handled += 1
9548
9549                 return bool(events_handled)
9550
9551         def _register(self, f, eventmask, handler):
9552                 """
9553                 @rtype: Integer
9554                 @return: A unique registration id, for use in schedule() or
9555                         unregister() calls.
9556                 """
9557                 if f in self._poll_event_handlers:
9558                         raise AssertionError("fd %d is already registered" % f)
9559                 self._event_handler_id += 1
9560                 reg_id = self._event_handler_id
9561                 self._poll_event_handler_ids[reg_id] = f
9562                 self._poll_event_handlers[f] = (handler, reg_id)
9563                 self._poll_obj.register(f, eventmask)
9564                 return reg_id
9565
9566         def _unregister(self, reg_id):
9567                 f = self._poll_event_handler_ids[reg_id]
9568                 self._poll_obj.unregister(f)
9569                 del self._poll_event_handlers[f]
9570                 del self._poll_event_handler_ids[reg_id]
9571
9572         def _schedule_wait(self, wait_ids):
9573                 """
9574                 Schedule until wait_id is not longer registered
9575                 for poll() events.
9576                 @type wait_id: int
9577                 @param wait_id: a task id to wait for
9578                 """
9579                 event_handlers = self._poll_event_handlers
9580                 handler_ids = self._poll_event_handler_ids
9581                 event_handled = False
9582
9583                 if isinstance(wait_ids, int):
9584                         wait_ids = frozenset([wait_ids])
9585
9586                 try:
9587                         while wait_ids.intersection(handler_ids):
9588                                 f, event = self._next_poll_event()
9589                                 handler, reg_id = event_handlers[f]
9590                                 handler(f, event)
9591                                 event_handled = True
9592                 except StopIteration:
9593                         event_handled = True
9594
9595                 return event_handled
9596
9597 class QueueScheduler(PollScheduler):
9598
9599         """
9600         Add instances of SequentialTaskQueue and then call run(). The
9601         run() method returns when no tasks remain.
9602         """
9603
9604         def __init__(self, max_jobs=None, max_load=None):
9605                 PollScheduler.__init__(self)
9606
9607                 if max_jobs is None:
9608                         max_jobs = 1
9609
9610                 self._max_jobs = max_jobs
9611                 self._max_load = max_load
9612                 self.sched_iface = self._sched_iface_class(
9613                         register=self._register,
9614                         schedule=self._schedule_wait,
9615                         unregister=self._unregister)
9616
9617                 self._queues = []
9618                 self._schedule_listeners = []
9619
9620         def add(self, q):
9621                 self._queues.append(q)
9622
9623         def remove(self, q):
9624                 self._queues.remove(q)
9625
9626         def run(self):
9627
9628                 while self._schedule():
9629                         self._poll_loop()
9630
9631                 while self._running_job_count():
9632                         self._poll_loop()
9633
9634         def _schedule_tasks(self):
9635                 """
9636                 @rtype: bool
9637                 @returns: True if there may be remaining tasks to schedule,
9638                         False otherwise.
9639                 """
9640                 while self._can_add_job():
9641                         n = self._max_jobs - self._running_job_count()
9642                         if n < 1:
9643                                 break
9644
9645                         if not self._start_next_job(n):
9646                                 return False
9647
9648                 for q in self._queues:
9649                         if q:
9650                                 return True
9651                 return False
9652
9653         def _running_job_count(self):
9654                 job_count = 0
9655                 for q in self._queues:
9656                         job_count += len(q.running_tasks)
9657                 self._jobs = job_count
9658                 return job_count
9659
9660         def _start_next_job(self, n=1):
9661                 started_count = 0
9662                 for q in self._queues:
9663                         initial_job_count = len(q.running_tasks)
9664                         q.schedule()
9665                         final_job_count = len(q.running_tasks)
9666                         if final_job_count > initial_job_count:
9667                                 started_count += (final_job_count - initial_job_count)
9668                         if started_count >= n:
9669                                 break
9670                 return started_count
9671
9672 class TaskScheduler(object):
9673
9674         """
9675         A simple way to handle scheduling of AsynchrousTask instances. Simply
9676         add tasks and call run(). The run() method returns when no tasks remain.
9677         """
9678
9679         def __init__(self, max_jobs=None, max_load=None):
9680                 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9681                 self._scheduler = QueueScheduler(
9682                         max_jobs=max_jobs, max_load=max_load)
9683                 self.sched_iface = self._scheduler.sched_iface
9684                 self.run = self._scheduler.run
9685                 self._scheduler.add(self._queue)
9686
9687         def add(self, task):
9688                 self._queue.add(task)
9689
9690 class JobStatusDisplay(object):
9691
9692         _bound_properties = ("curval", "failed", "running")
9693         _jobs_column_width = 48
9694
9695         # Don't update the display unless at least this much
9696         # time has passed, in units of seconds.
9697         _min_display_latency = 2
9698
9699         _default_term_codes = {
9700                 'cr'  : '\r',
9701                 'el'  : '\x1b[K',
9702                 'nel' : '\n',
9703         }
9704
9705         _termcap_name_map = {
9706                 'carriage_return' : 'cr',
9707                 'clr_eol'         : 'el',
9708                 'newline'         : 'nel',
9709         }
9710
9711         def __init__(self, out=sys.stdout, quiet=False):
9712                 object.__setattr__(self, "out", out)
9713                 object.__setattr__(self, "quiet", quiet)
9714                 object.__setattr__(self, "maxval", 0)
9715                 object.__setattr__(self, "merges", 0)
9716                 object.__setattr__(self, "_changed", False)
9717                 object.__setattr__(self, "_displayed", False)
9718                 object.__setattr__(self, "_last_display_time", 0)
9719                 object.__setattr__(self, "width", 80)
9720                 self.reset()
9721
9722                 isatty = hasattr(out, "isatty") and out.isatty()
9723                 object.__setattr__(self, "_isatty", isatty)
9724                 if not isatty or not self._init_term():
9725                         term_codes = {}
9726                         for k, capname in self._termcap_name_map.iteritems():
9727                                 term_codes[k] = self._default_term_codes[capname]
9728                         object.__setattr__(self, "_term_codes", term_codes)
9729                 encoding = sys.getdefaultencoding()
9730                 for k, v in self._term_codes.items():
9731                         if not isinstance(v, str):
9732                                 self._term_codes[k] = v.decode(encoding, 'replace')
9733
9734         def _init_term(self):
9735                 """
9736                 Initialize term control codes.
9737                 @rtype: bool
9738                 @returns: True if term codes were successfully initialized,
9739                         False otherwise.
9740                 """
9741
9742                 term_type = os.environ.get("TERM", "vt100")
9743                 tigetstr = None
9744
9745                 try:
9746                         import curses
9747                         try:
9748                                 curses.setupterm(term_type, self.out.fileno())
9749                                 tigetstr = curses.tigetstr
9750                         except curses.error:
9751                                 pass
9752                 except ImportError:
9753                         pass
9754
9755                 if tigetstr is None:
9756                         return False
9757
9758                 term_codes = {}
9759                 for k, capname in self._termcap_name_map.iteritems():
9760                         code = tigetstr(capname)
9761                         if code is None:
9762                                 code = self._default_term_codes[capname]
9763                         term_codes[k] = code
9764                 object.__setattr__(self, "_term_codes", term_codes)
9765                 return True
9766
9767         def _format_msg(self, msg):
9768                 return ">>> %s" % msg
9769
9770         def _erase(self):
9771                 self.out.write(
9772                         self._term_codes['carriage_return'] + \
9773                         self._term_codes['clr_eol'])
9774                 self.out.flush()
9775                 self._displayed = False
9776
9777         def _display(self, line):
9778                 self.out.write(line)
9779                 self.out.flush()
9780                 self._displayed = True
9781
9782         def _update(self, msg):
9783
9784                 out = self.out
9785                 if not self._isatty:
9786                         out.write(self._format_msg(msg) + self._term_codes['newline'])
9787                         self.out.flush()
9788                         self._displayed = True
9789                         return
9790
9791                 if self._displayed:
9792                         self._erase()
9793
9794                 self._display(self._format_msg(msg))
9795
9796         def displayMessage(self, msg):
9797
9798                 was_displayed = self._displayed
9799
9800                 if self._isatty and self._displayed:
9801                         self._erase()
9802
9803                 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9804                 self.out.flush()
9805                 self._displayed = False
9806
9807                 if was_displayed:
9808                         self._changed = True
9809                         self.display()
9810
9811         def reset(self):
9812                 self.maxval = 0
9813                 self.merges = 0
9814                 for name in self._bound_properties:
9815                         object.__setattr__(self, name, 0)
9816
9817                 if self._displayed:
9818                         self.out.write(self._term_codes['newline'])
9819                         self.out.flush()
9820                         self._displayed = False
9821
9822         def __setattr__(self, name, value):
9823                 old_value = getattr(self, name)
9824                 if value == old_value:
9825                         return
9826                 object.__setattr__(self, name, value)
9827                 if name in self._bound_properties:
9828                         self._property_change(name, old_value, value)
9829
9830         def _property_change(self, name, old_value, new_value):
9831                 self._changed = True
9832                 self.display()
9833
9834         def _load_avg_str(self):
9835                 try:
9836                         avg = getloadavg()
9837                 except OSError:
9838                         return 'unknown'
9839
9840                 max_avg = max(avg)
9841
9842                 if max_avg < 10:
9843                         digits = 2
9844                 elif max_avg < 100:
9845                         digits = 1
9846                 else:
9847                         digits = 0
9848
9849                 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9850
9851         def display(self):
9852                 """
9853                 Display status on stdout, but only if something has
9854                 changed since the last call.
9855                 """
9856
9857                 if self.quiet:
9858                         return
9859
9860                 current_time = time.time()
9861                 time_delta = current_time - self._last_display_time
9862                 if self._displayed and \
9863                         not self._changed:
9864                         if not self._isatty:
9865                                 return
9866                         if time_delta < self._min_display_latency:
9867                                 return
9868
9869                 self._last_display_time = current_time
9870                 self._changed = False
9871                 self._display_status()
9872
9873         def _display_status(self):
9874                 # Don't use len(self._completed_tasks) here since that also
9875                 # can include uninstall tasks.
9876                 curval_str = str(self.curval)
9877                 maxval_str = str(self.maxval)
9878                 running_str = str(self.running)
9879                 failed_str = str(self.failed)
9880                 load_avg_str = self._load_avg_str()
9881
9882                 color_output = StringIO()
9883                 plain_output = StringIO()
9884                 style_file = portage.output.ConsoleStyleFile(color_output)
9885                 style_file.write_listener = plain_output
9886                 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9887                 style_writer.style_listener = style_file.new_styles
9888                 f = formatter.AbstractFormatter(style_writer)
9889
9890                 number_style = "INFORM"
9891                 f.add_literal_data("Jobs: ")
9892                 f.push_style(number_style)
9893                 f.add_literal_data(curval_str)
9894                 f.pop_style()
9895                 f.add_literal_data(" of ")
9896                 f.push_style(number_style)
9897                 f.add_literal_data(maxval_str)
9898                 f.pop_style()
9899                 f.add_literal_data(" complete")
9900
9901                 if self.running:
9902                         f.add_literal_data(", ")
9903                         f.push_style(number_style)
9904                         f.add_literal_data(running_str)
9905                         f.pop_style()
9906                         f.add_literal_data(" running")
9907
9908                 if self.failed:
9909                         f.add_literal_data(", ")
9910                         f.push_style(number_style)
9911                         f.add_literal_data(failed_str)
9912                         f.pop_style()
9913                         f.add_literal_data(" failed")
9914
9915                 padding = self._jobs_column_width - len(plain_output.getvalue())
9916                 if padding > 0:
9917                         f.add_literal_data(padding * " ")
9918
9919                 f.add_literal_data("Load avg: ")
9920                 f.add_literal_data(load_avg_str)
9921
9922                 # Truncate to fit width, to avoid making the terminal scroll if the
9923                 # line overflows (happens when the load average is large).
9924                 plain_output = plain_output.getvalue()
9925                 if self._isatty and len(plain_output) > self.width:
9926                         # Use plain_output here since it's easier to truncate
9927                         # properly than the color output which contains console
9928                         # color codes.
9929                         self._update(plain_output[:self.width])
9930                 else:
9931                         self._update(color_output.getvalue())
9932
9933                 xtermTitle(" ".join(plain_output.split()))
9934
9935 class Scheduler(PollScheduler):
9936
9937         _opts_ignore_blockers = \
9938                 frozenset(["--buildpkgonly",
9939                 "--fetchonly", "--fetch-all-uri",
9940                 "--nodeps", "--pretend"])
9941
9942         _opts_no_background = \
9943                 frozenset(["--pretend",
9944                 "--fetchonly", "--fetch-all-uri"])
9945
9946         _opts_no_restart = frozenset(["--buildpkgonly",
9947                 "--fetchonly", "--fetch-all-uri", "--pretend"])
9948
9949         _bad_resume_opts = set(["--ask", "--changelog",
9950                 "--resume", "--skipfirst"])
9951
9952         _fetch_log = "/var/log/emerge-fetch.log"
9953
9954         class _iface_class(SlotObject):
9955                 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
9956                         "dblinkElog", "fetch", "register", "schedule",
9957                         "scheduleSetup", "scheduleUnpack", "scheduleYield",
9958                         "unregister")
9959
9960         class _fetch_iface_class(SlotObject):
9961                 __slots__ = ("log_file", "schedule")
9962
9963         _task_queues_class = slot_dict_class(
9964                 ("merge", "jobs", "fetch", "unpack"), prefix="")
9965
9966         class _build_opts_class(SlotObject):
9967                 __slots__ = ("buildpkg", "buildpkgonly",
9968                         "fetch_all_uri", "fetchonly", "pretend")
9969
9970         class _binpkg_opts_class(SlotObject):
9971                 __slots__ = ("fetchonly", "getbinpkg", "pretend")
9972
9973         class _pkg_count_class(SlotObject):
9974                 __slots__ = ("curval", "maxval")
9975
9976         class _emerge_log_class(SlotObject):
9977                 __slots__ = ("xterm_titles",)
9978
9979                 def log(self, *pargs, **kwargs):
9980                         if not self.xterm_titles:
9981                                 # Avoid interference with the scheduler's status display.
9982                                 kwargs.pop("short_msg", None)
9983                         emergelog(self.xterm_titles, *pargs, **kwargs)
9984
9985         class _failed_pkg(SlotObject):
9986                 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
9987
9988         class _ConfigPool(object):
9989                 """Interface for a task to temporarily allocate a config
9990                 instance from a pool. This allows a task to be constructed
9991                 long before the config instance actually becomes needed, like
9992                 when prefetchers are constructed for the whole merge list."""
9993                 __slots__ = ("_root", "_allocate", "_deallocate")
9994                 def __init__(self, root, allocate, deallocate):
9995                         self._root = root
9996                         self._allocate = allocate
9997                         self._deallocate = deallocate
9998                 def allocate(self):
9999                         return self._allocate(self._root)
10000                 def deallocate(self, settings):
10001                         self._deallocate(settings)
10002
10003         class _unknown_internal_error(portage.exception.PortageException):
10004                 """
10005                 Used internally to terminate scheduling. The specific reason for
10006                 the failure should have been dumped to stderr.
10007                 """
10008                 def __init__(self, value=""):
10009                         portage.exception.PortageException.__init__(self, value)
10010
10011         def __init__(self, settings, trees, mtimedb, myopts,
10012                 spinner, mergelist, favorites, digraph):
10013                 PollScheduler.__init__(self)
10014                 self.settings = settings
10015                 self.target_root = settings["ROOT"]
10016                 self.trees = trees
10017                 self.myopts = myopts
10018                 self._spinner = spinner
10019                 self._mtimedb = mtimedb
10020                 self._mergelist = mergelist
10021                 self._favorites = favorites
10022                 self._args_set = InternalPackageSet(favorites)
10023                 self._build_opts = self._build_opts_class()
10024                 for k in self._build_opts.__slots__:
10025                         setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
10026                 self._binpkg_opts = self._binpkg_opts_class()
10027                 for k in self._binpkg_opts.__slots__:
10028                         setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
10029
10030                 self.curval = 0
10031                 self._logger = self._emerge_log_class()
10032                 self._task_queues = self._task_queues_class()
10033                 for k in self._task_queues.allowed_keys:
10034                         setattr(self._task_queues, k,
10035                                 SequentialTaskQueue())
10036
10037                 # Holds merges that will wait to be executed when no builds are
10038                 # executing. This is useful for system packages since dependencies
10039                 # on system packages are frequently unspecified.
10040                 self._merge_wait_queue = []
10041                 # Holds merges that have been transfered from the merge_wait_queue to
10042                 # the actual merge queue. They are removed from this list upon
10043                 # completion. Other packages can start building only when this list is
10044                 # empty.
10045                 self._merge_wait_scheduled = []
10046
10047                 # Holds system packages and their deep runtime dependencies. Before
10048                 # being merged, these packages go to merge_wait_queue, to be merged
10049                 # when no other packages are building.
10050                 self._deep_system_deps = set()
10051
10052                 # Holds packages to merge which will satisfy currently unsatisfied
10053                 # deep runtime dependencies of system packages. If this is not empty
10054                 # then no parallel builds will be spawned until it is empty. This
10055                 # minimizes the possibility that a build will fail due to the system
10056                 # being in a fragile state. For example, see bug #259954.
10057                 self._unsatisfied_system_deps = set()
10058
10059                 self._status_display = JobStatusDisplay()
10060                 self._max_load = myopts.get("--load-average")
10061                 max_jobs = myopts.get("--jobs")
10062                 if max_jobs is None:
10063                         max_jobs = 1
10064                 self._set_max_jobs(max_jobs)
10065
10066                 # The root where the currently running
10067                 # portage instance is installed.
10068                 self._running_root = trees["/"]["root_config"]
10069                 self.edebug = 0
10070                 if settings.get("PORTAGE_DEBUG", "") == "1":
10071                         self.edebug = 1
10072                 self.pkgsettings = {}
10073                 self._config_pool = {}
10074                 self._blocker_db = {}
10075                 for root in trees:
10076                         self._config_pool[root] = []
10077                         self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
10078
10079                 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
10080                         schedule=self._schedule_fetch)
10081                 self._sched_iface = self._iface_class(
10082                         dblinkEbuildPhase=self._dblink_ebuild_phase,
10083                         dblinkDisplayMerge=self._dblink_display_merge,
10084                         dblinkElog=self._dblink_elog,
10085                         fetch=fetch_iface, register=self._register,
10086                         schedule=self._schedule_wait,
10087                         scheduleSetup=self._schedule_setup,
10088                         scheduleUnpack=self._schedule_unpack,
10089                         scheduleYield=self._schedule_yield,
10090                         unregister=self._unregister)
10091
10092                 self._prefetchers = weakref.WeakValueDictionary()
10093                 self._pkg_queue = []
10094                 self._completed_tasks = set()
10095
10096                 self._failed_pkgs = []
10097                 self._failed_pkgs_all = []
10098                 self._failed_pkgs_die_msgs = []
10099                 self._post_mod_echo_msgs = []
10100                 self._parallel_fetch = False
10101                 merge_count = len([x for x in mergelist \
10102                         if isinstance(x, Package) and x.operation == "merge"])
10103                 self._pkg_count = self._pkg_count_class(
10104                         curval=0, maxval=merge_count)
10105                 self._status_display.maxval = self._pkg_count.maxval
10106
10107                 # The load average takes some time to respond when new
10108                 # jobs are added, so we need to limit the rate of adding
10109                 # new jobs.
10110                 self._job_delay_max = 10
10111                 self._job_delay_factor = 1.0
10112                 self._job_delay_exp = 1.5
10113                 self._previous_job_start_time = None
10114
10115                 self._set_digraph(digraph)
10116
10117                 # This is used to memoize the _choose_pkg() result when
10118                 # no packages can be chosen until one of the existing
10119                 # jobs completes.
10120                 self._choose_pkg_return_early = False
10121
10122                 features = self.settings.features
10123                 if "parallel-fetch" in features and \
10124                         not ("--pretend" in self.myopts or \
10125                         "--fetch-all-uri" in self.myopts or \
10126                         "--fetchonly" in self.myopts):
10127                         if "distlocks" not in features:
10128                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10129                                 portage.writemsg(red("!!!")+" parallel-fetching " + \
10130                                         "requires the distlocks feature enabled"+"\n",
10131                                         noiselevel=-1)
10132                                 portage.writemsg(red("!!!")+" you have it disabled, " + \
10133                                         "thus parallel-fetching is being disabled"+"\n",
10134                                         noiselevel=-1)
10135                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10136                         elif len(mergelist) > 1:
10137                                 self._parallel_fetch = True
10138
10139                 if self._parallel_fetch:
10140                                 # clear out existing fetch log if it exists
10141                                 try:
10142                                         open(self._fetch_log, 'w')
10143                                 except EnvironmentError:
10144                                         pass
10145
10146                 self._running_portage = None
10147                 portage_match = self._running_root.trees["vartree"].dbapi.match(
10148                         portage.const.PORTAGE_PACKAGE_ATOM)
10149                 if portage_match:
10150                         cpv = portage_match.pop()
10151                         self._running_portage = self._pkg(cpv, "installed",
10152                                 self._running_root, installed=True)
10153
10154         def _poll(self, timeout=None):
10155                 self._schedule()
10156                 PollScheduler._poll(self, timeout=timeout)
10157
10158         def _set_max_jobs(self, max_jobs):
10159                 self._max_jobs = max_jobs
10160                 self._task_queues.jobs.max_jobs = max_jobs
10161
10162         def _background_mode(self):
10163                 """
10164                 Check if background mode is enabled and adjust states as necessary.
10165
10166                 @rtype: bool
10167                 @returns: True if background mode is enabled, False otherwise.
10168                 """
10169                 background = (self._max_jobs is True or \
10170                         self._max_jobs > 1 or "--quiet" in self.myopts) and \
10171                         not bool(self._opts_no_background.intersection(self.myopts))
10172
10173                 if background:
10174                         interactive_tasks = self._get_interactive_tasks()
10175                         if interactive_tasks:
10176                                 background = False
10177                                 writemsg_level(">>> Sending package output to stdio due " + \
10178                                         "to interactive package(s):\n",
10179                                         level=logging.INFO, noiselevel=-1)
10180                                 msg = [""]
10181                                 for pkg in interactive_tasks:
10182                                         pkg_str = "  " + colorize("INFORM", str(pkg.cpv))
10183                                         if pkg.root != "/":
10184                                                 pkg_str += " for " + pkg.root
10185                                         msg.append(pkg_str)
10186                                 msg.append("")
10187                                 writemsg_level("".join("%s\n" % (l,) for l in msg),
10188                                         level=logging.INFO, noiselevel=-1)
10189                                 if self._max_jobs is True or self._max_jobs > 1:
10190                                         self._set_max_jobs(1)
10191                                         writemsg_level(">>> Setting --jobs=1 due " + \
10192                                                 "to the above interactive package(s)\n",
10193                                                 level=logging.INFO, noiselevel=-1)
10194
10195                 self._status_display.quiet = \
10196                         not background or \
10197                         ("--quiet" in self.myopts and \
10198                         "--verbose" not in self.myopts)
10199
10200                 self._logger.xterm_titles = \
10201                         "notitles" not in self.settings.features and \
10202                         self._status_display.quiet
10203
10204                 return background
10205
10206         def _get_interactive_tasks(self):
10207                 from portage import flatten
10208                 from portage.dep import use_reduce, paren_reduce
10209                 interactive_tasks = []
10210                 for task in self._mergelist:
10211                         if not (isinstance(task, Package) and \
10212                                 task.operation == "merge"):
10213                                 continue
10214                         try:
10215                                 properties = flatten(use_reduce(paren_reduce(
10216                                         task.metadata["PROPERTIES"]), uselist=task.use.enabled))
10217                         except portage.exception.InvalidDependString, e:
10218                                 show_invalid_depstring_notice(task,
10219                                         task.metadata["PROPERTIES"], str(e))
10220                                 raise self._unknown_internal_error()
10221                         if "interactive" in properties:
10222                                 interactive_tasks.append(task)
10223                 return interactive_tasks
10224
10225         def _set_digraph(self, digraph):
10226                 if "--nodeps" in self.myopts or \
10227                         (self._max_jobs is not True and self._max_jobs < 2):
10228                         # save some memory
10229                         self._digraph = None
10230                         return
10231
10232                 self._digraph = digraph
10233                 self._find_system_deps()
10234                 self._prune_digraph()
10235                 self._prevent_builddir_collisions()
10236
10237         def _find_system_deps(self):
10238                 """
10239                 Find system packages and their deep runtime dependencies. Before being
10240                 merged, these packages go to merge_wait_queue, to be merged when no
10241                 other packages are building.
10242                 """
10243                 deep_system_deps = self._deep_system_deps
10244                 deep_system_deps.clear()
10245                 deep_system_deps.update(
10246                         _find_deep_system_runtime_deps(self._digraph))
10247                 deep_system_deps.difference_update([pkg for pkg in \
10248                         deep_system_deps if pkg.operation != "merge"])
10249
10250         def _prune_digraph(self):
10251                 """
10252                 Prune any root nodes that are irrelevant.
10253                 """
10254
10255                 graph = self._digraph
10256                 completed_tasks = self._completed_tasks
10257                 removed_nodes = set()
10258                 while True:
10259                         for node in graph.root_nodes():
10260                                 if not isinstance(node, Package) or \
10261                                         (node.installed and node.operation == "nomerge") or \
10262                                         node.onlydeps or \
10263                                         node in completed_tasks:
10264                                         removed_nodes.add(node)
10265                         if removed_nodes:
10266                                 graph.difference_update(removed_nodes)
10267                         if not removed_nodes:
10268                                 break
10269                         removed_nodes.clear()
10270
10271         def _prevent_builddir_collisions(self):
10272                 """
10273                 When building stages, sometimes the same exact cpv needs to be merged
10274                 to both $ROOTs. Add edges to the digraph in order to avoid collisions
10275                 in the builddir. Currently, normal file locks would be inappropriate
10276                 for this purpose since emerge holds all of it's build dir locks from
10277                 the main process.
10278                 """
10279                 cpv_map = {}
10280                 for pkg in self._mergelist:
10281                         if not isinstance(pkg, Package):
10282                                 # a satisfied blocker
10283                                 continue
10284                         if pkg.installed:
10285                                 continue
10286                         if pkg.cpv not in cpv_map:
10287                                 cpv_map[pkg.cpv] = [pkg]
10288                                 continue
10289                         for earlier_pkg in cpv_map[pkg.cpv]:
10290                                 self._digraph.add(earlier_pkg, pkg,
10291                                         priority=DepPriority(buildtime=True))
10292                         cpv_map[pkg.cpv].append(pkg)
10293
10294         class _pkg_failure(portage.exception.PortageException):
10295                 """
10296                 An instance of this class is raised by unmerge() when
10297                 an uninstallation fails.
10298                 """
10299                 status = 1
10300                 def __init__(self, *pargs):
10301                         portage.exception.PortageException.__init__(self, pargs)
10302                         if pargs:
10303                                 self.status = pargs[0]
10304
10305         def _schedule_fetch(self, fetcher):
10306                 """
10307                 Schedule a fetcher on the fetch queue, in order to
10308                 serialize access to the fetch log.
10309                 """
10310                 self._task_queues.fetch.addFront(fetcher)
10311
10312         def _schedule_setup(self, setup_phase):
10313                 """
10314                 Schedule a setup phase on the merge queue, in order to
10315                 serialize unsandboxed access to the live filesystem.
10316                 """
10317                 self._task_queues.merge.addFront(setup_phase)
10318                 self._schedule()
10319
10320         def _schedule_unpack(self, unpack_phase):
10321                 """
10322                 Schedule an unpack phase on the unpack queue, in order
10323                 to serialize $DISTDIR access for live ebuilds.
10324                 """
10325                 self._task_queues.unpack.add(unpack_phase)
10326
10327         def _find_blockers(self, new_pkg):
10328                 """
10329                 Returns a callable which should be called only when
10330                 the vdb lock has been acquired.
10331                 """
10332                 def get_blockers():
10333                         return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
10334                 return get_blockers
10335
10336         def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
10337                 if self._opts_ignore_blockers.intersection(self.myopts):
10338                         return None
10339
10340                 # Call gc.collect() here to avoid heap overflow that
10341                 # triggers 'Cannot allocate memory' errors (reported
10342                 # with python-2.5).
10343                 import gc
10344                 gc.collect()
10345
10346                 blocker_db = self._blocker_db[new_pkg.root]
10347
10348                 blocker_dblinks = []
10349                 for blocking_pkg in blocker_db.findInstalledBlockers(
10350                         new_pkg, acquire_lock=acquire_lock):
10351                         if new_pkg.slot_atom == blocking_pkg.slot_atom:
10352                                 continue
10353                         if new_pkg.cpv == blocking_pkg.cpv:
10354                                 continue
10355                         blocker_dblinks.append(portage.dblink(
10356                                 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
10357                                 self.pkgsettings[blocking_pkg.root], treetype="vartree",
10358                                 vartree=self.trees[blocking_pkg.root]["vartree"]))
10359
10360                 gc.collect()
10361
10362                 return blocker_dblinks
10363
10364         def _dblink_pkg(self, pkg_dblink):
10365                 cpv = pkg_dblink.mycpv
10366                 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10367                 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10368                 installed = type_name == "installed"
10369                 return self._pkg(cpv, type_name, root_config, installed=installed)
10370
10371         def _append_to_log_path(self, log_path, msg):
10372                 f = open(log_path, 'a')
10373                 try:
10374                         f.write(msg)
10375                 finally:
10376                         f.close()
10377
10378         def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10379
10380                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10381                 log_file = None
10382                 out = sys.stdout
10383                 background = self._background
10384
10385                 if background and log_path is not None:
10386                         log_file = open(log_path, 'a')
10387                         out = log_file
10388
10389                 try:
10390                         for msg in msgs:
10391                                 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10392                 finally:
10393                         if log_file is not None:
10394                                 log_file.close()
10395
10396         def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10397                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10398                 background = self._background
10399
10400                 if log_path is None:
10401                         if not (background and level < logging.WARN):
10402                                 portage.util.writemsg_level(msg,
10403                                         level=level, noiselevel=noiselevel)
10404                 else:
10405                         if not background:
10406                                 portage.util.writemsg_level(msg,
10407                                         level=level, noiselevel=noiselevel)
10408                         self._append_to_log_path(log_path, msg)
10409
10410         def _dblink_ebuild_phase(self,
10411                 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10412                 """
10413                 Using this callback for merge phases allows the scheduler
10414                 to run while these phases execute asynchronously, and allows
10415                 the scheduler control output handling.
10416                 """
10417
10418                 scheduler = self._sched_iface
10419                 settings = pkg_dblink.settings
10420                 pkg = self._dblink_pkg(pkg_dblink)
10421                 background = self._background
10422                 log_path = settings.get("PORTAGE_LOG_FILE")
10423
10424                 ebuild_phase = EbuildPhase(background=background,
10425                         pkg=pkg, phase=phase, scheduler=scheduler,
10426                         settings=settings, tree=pkg_dblink.treetype)
10427                 ebuild_phase.start()
10428                 ebuild_phase.wait()
10429
10430                 return ebuild_phase.returncode
10431
10432         def _check_manifests(self):
10433                 # Verify all the manifests now so that the user is notified of failure
10434                 # as soon as possible.
10435                 if "strict" not in self.settings.features or \
10436                         "--fetchonly" in self.myopts or \
10437                         "--fetch-all-uri" in self.myopts:
10438                         return os.EX_OK
10439
10440                 shown_verifying_msg = False
10441                 quiet_settings = {}
10442                 for myroot, pkgsettings in self.pkgsettings.iteritems():
10443                         quiet_config = portage.config(clone=pkgsettings)
10444                         quiet_config["PORTAGE_QUIET"] = "1"
10445                         quiet_config.backup_changes("PORTAGE_QUIET")
10446                         quiet_settings[myroot] = quiet_config
10447                         del quiet_config
10448
10449                 for x in self._mergelist:
10450                         if not isinstance(x, Package) or \
10451                                 x.type_name != "ebuild":
10452                                 continue
10453
10454                         if not shown_verifying_msg:
10455                                 shown_verifying_msg = True
10456                                 self._status_msg("Verifying ebuild manifests")
10457
10458                         root_config = x.root_config
10459                         portdb = root_config.trees["porttree"].dbapi
10460                         quiet_config = quiet_settings[root_config.root]
10461                         quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10462                         if not portage.digestcheck([], quiet_config, strict=True):
10463                                 return 1
10464
10465                 return os.EX_OK
10466
10467         def _add_prefetchers(self):
10468
10469                 if not self._parallel_fetch:
10470                         return
10471
10472                 if self._parallel_fetch:
10473                         self._status_msg("Starting parallel fetch")
10474
10475                         prefetchers = self._prefetchers
10476                         getbinpkg = "--getbinpkg" in self.myopts
10477
10478                         # In order to avoid "waiting for lock" messages
10479                         # at the beginning, which annoy users, never
10480                         # spawn a prefetcher for the first package.
10481                         for pkg in self._mergelist[1:]:
10482                                 prefetcher = self._create_prefetcher(pkg)
10483                                 if prefetcher is not None:
10484                                         self._task_queues.fetch.add(prefetcher)
10485                                         prefetchers[pkg] = prefetcher
10486
10487         def _create_prefetcher(self, pkg):
10488                 """
10489                 @return: a prefetcher, or None if not applicable
10490                 """
10491                 prefetcher = None
10492
10493                 if not isinstance(pkg, Package):
10494                         pass
10495
10496                 elif pkg.type_name == "ebuild":
10497
10498                         prefetcher = EbuildFetcher(background=True,
10499                                 config_pool=self._ConfigPool(pkg.root,
10500                                 self._allocate_config, self._deallocate_config),
10501                                 fetchonly=1, logfile=self._fetch_log,
10502                                 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10503
10504                 elif pkg.type_name == "binary" and \
10505                         "--getbinpkg" in self.myopts and \
10506                         pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10507
10508                         prefetcher = BinpkgPrefetcher(background=True,
10509                                 pkg=pkg, scheduler=self._sched_iface)
10510
10511                 return prefetcher
10512
10513         def _is_restart_scheduled(self):
10514                 """
10515                 Check if the merge list contains a replacement
10516                 for the current running instance, that will result
10517                 in restart after merge.
10518                 @rtype: bool
10519                 @returns: True if a restart is scheduled, False otherwise.
10520                 """
10521                 if self._opts_no_restart.intersection(self.myopts):
10522                         return False
10523
10524                 mergelist = self._mergelist
10525
10526                 for i, pkg in enumerate(mergelist):
10527                         if self._is_restart_necessary(pkg) and \
10528                                 i != len(mergelist) - 1:
10529                                 return True
10530
10531                 return False
10532
10533         def _is_restart_necessary(self, pkg):
10534                 """
10535                 @return: True if merging the given package
10536                         requires restart, False otherwise.
10537                 """
10538
10539                 # Figure out if we need a restart.
10540                 if pkg.root == self._running_root.root and \
10541                         portage.match_from_list(
10542                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10543                         if self._running_portage:
10544                                 return pkg.cpv != self._running_portage.cpv
10545                         return True
10546                 return False
10547
10548         def _restart_if_necessary(self, pkg):
10549                 """
10550                 Use execv() to restart emerge. This happens
10551                 if portage upgrades itself and there are
10552                 remaining packages in the list.
10553                 """
10554
10555                 if self._opts_no_restart.intersection(self.myopts):
10556                         return
10557
10558                 if not self._is_restart_necessary(pkg):
10559                         return
10560
10561                 if pkg == self._mergelist[-1]:
10562                         return
10563
10564                 self._main_loop_cleanup()
10565
10566                 logger = self._logger
10567                 pkg_count = self._pkg_count
10568                 mtimedb = self._mtimedb
10569                 bad_resume_opts = self._bad_resume_opts
10570
10571                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10572                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10573
10574                 logger.log(" *** RESTARTING " + \
10575                         "emerge via exec() after change of " + \
10576                         "portage version.")
10577
10578                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10579                 mtimedb.commit()
10580                 portage.run_exitfuncs()
10581                 mynewargv = [sys.argv[0], "--resume"]
10582                 resume_opts = self.myopts.copy()
10583                 # For automatic resume, we need to prevent
10584                 # any of bad_resume_opts from leaking in
10585                 # via EMERGE_DEFAULT_OPTS.
10586                 resume_opts["--ignore-default-opts"] = True
10587                 for myopt, myarg in resume_opts.iteritems():
10588                         if myopt not in bad_resume_opts:
10589                                 if myarg is True:
10590                                         mynewargv.append(myopt)
10591                                 else:
10592                                         mynewargv.append(myopt +"="+ str(myarg))
10593                 # priority only needs to be adjusted on the first run
10594                 os.environ["PORTAGE_NICENESS"] = "0"
10595                 os.execv(mynewargv[0], mynewargv)
10596
10597         def merge(self):
10598
10599                 if "--resume" in self.myopts:
10600                         # We're resuming.
10601                         portage.writemsg_stdout(
10602                                 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10603                         self._logger.log(" *** Resuming merge...")
10604
10605                 self._save_resume_list()
10606
10607                 try:
10608                         self._background = self._background_mode()
10609                 except self._unknown_internal_error:
10610                         return 1
10611
10612                 for root in self.trees:
10613                         root_config = self.trees[root]["root_config"]
10614
10615                         # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10616                         # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10617                         # for ensuring sane $PWD (bug #239560) and storing elog messages.
10618                         tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10619                         if not tmpdir or not os.path.isdir(tmpdir):
10620                                 msg = "The directory specified in your " + \
10621                                         "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10622                                 "does not exist. Please create this " + \
10623                                 "directory or correct your PORTAGE_TMPDIR setting."
10624                                 msg = textwrap.wrap(msg, 70)
10625                                 out = portage.output.EOutput()
10626                                 for l in msg:
10627                                         out.eerror(l)
10628                                 return 1
10629
10630                         if self._background:
10631                                 root_config.settings.unlock()
10632                                 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10633                                 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10634                                 root_config.settings.lock()
10635
10636                         self.pkgsettings[root] = portage.config(
10637                                 clone=root_config.settings)
10638
10639                 rval = self._check_manifests()
10640                 if rval != os.EX_OK:
10641                         return rval
10642
10643                 keep_going = "--keep-going" in self.myopts
10644                 fetchonly = self._build_opts.fetchonly
10645                 mtimedb = self._mtimedb
10646                 failed_pkgs = self._failed_pkgs
10647
10648                 while True:
10649                         rval = self._merge()
10650                         if rval == os.EX_OK or fetchonly or not keep_going:
10651                                 break
10652                         if "resume" not in mtimedb:
10653                                 break
10654                         mergelist = self._mtimedb["resume"].get("mergelist")
10655                         if not mergelist:
10656                                 break
10657
10658                         if not failed_pkgs:
10659                                 break
10660
10661                         for failed_pkg in failed_pkgs:
10662                                 mergelist.remove(list(failed_pkg.pkg))
10663
10664                         self._failed_pkgs_all.extend(failed_pkgs)
10665                         del failed_pkgs[:]
10666
10667                         if not mergelist:
10668                                 break
10669
10670                         if not self._calc_resume_list():
10671                                 break
10672
10673                         clear_caches(self.trees)
10674                         if not self._mergelist:
10675                                 break
10676
10677                         self._save_resume_list()
10678                         self._pkg_count.curval = 0
10679                         self._pkg_count.maxval = len([x for x in self._mergelist \
10680                                 if isinstance(x, Package) and x.operation == "merge"])
10681                         self._status_display.maxval = self._pkg_count.maxval
10682
10683                 self._logger.log(" *** Finished. Cleaning up...")
10684
10685                 if failed_pkgs:
10686                         self._failed_pkgs_all.extend(failed_pkgs)
10687                         del failed_pkgs[:]
10688
10689                 background = self._background
10690                 failure_log_shown = False
10691                 if background and len(self._failed_pkgs_all) == 1:
10692                         # If only one package failed then just show it's
10693                         # whole log for easy viewing.
10694                         failed_pkg = self._failed_pkgs_all[-1]
10695                         build_dir = failed_pkg.build_dir
10696                         log_file = None
10697
10698                         log_paths = [failed_pkg.build_log]
10699
10700                         log_path = self._locate_failure_log(failed_pkg)
10701                         if log_path is not None:
10702                                 try:
10703                                         log_file = open(log_path)
10704                                 except IOError:
10705                                         pass
10706
10707                         if log_file is not None:
10708                                 try:
10709                                         for line in log_file:
10710                                                 writemsg_level(line, noiselevel=-1)
10711                                 finally:
10712                                         log_file.close()
10713                                 failure_log_shown = True
10714
10715                 # Dump mod_echo output now since it tends to flood the terminal.
10716                 # This allows us to avoid having more important output, generated
10717                 # later, from being swept away by the mod_echo output.
10718                 mod_echo_output =  _flush_elog_mod_echo()
10719
10720                 if background and not failure_log_shown and \
10721                         self._failed_pkgs_all and \
10722                         self._failed_pkgs_die_msgs and \
10723                         not mod_echo_output:
10724
10725                         printer = portage.output.EOutput()
10726                         for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10727                                 root_msg = ""
10728                                 if mysettings["ROOT"] != "/":
10729                                         root_msg = " merged to %s" % mysettings["ROOT"]
10730                                 print
10731                                 printer.einfo("Error messages for package %s%s:" % \
10732                                         (colorize("INFORM", key), root_msg))
10733                                 print
10734                                 for phase in portage.const.EBUILD_PHASES:
10735                                         if phase not in logentries:
10736                                                 continue
10737                                         for msgtype, msgcontent in logentries[phase]:
10738                                                 if isinstance(msgcontent, basestring):
10739                                                         msgcontent = [msgcontent]
10740                                                 for line in msgcontent:
10741                                                         printer.eerror(line.strip("\n"))
10742
10743                 if self._post_mod_echo_msgs:
10744                         for msg in self._post_mod_echo_msgs:
10745                                 msg()
10746
10747                 if len(self._failed_pkgs_all) > 1 or \
10748                         (self._failed_pkgs_all and "--keep-going" in self.myopts):
10749                         if len(self._failed_pkgs_all) > 1:
10750                                 msg = "The following %d packages have " % \
10751                                         len(self._failed_pkgs_all) + \
10752                                         "failed to build or install:"
10753                         else:
10754                                 msg = "The following package has " + \
10755                                         "failed to build or install:"
10756                         prefix = bad(" * ")
10757                         writemsg(prefix + "\n", noiselevel=-1)
10758                         from textwrap import wrap
10759                         for line in wrap(msg, 72):
10760                                 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10761                         writemsg(prefix + "\n", noiselevel=-1)
10762                         for failed_pkg in self._failed_pkgs_all:
10763                                 writemsg("%s\t%s\n" % (prefix,
10764                                         colorize("INFORM", str(failed_pkg.pkg))),
10765                                         noiselevel=-1)
10766                         writemsg(prefix + "\n", noiselevel=-1)
10767
10768                 return rval
10769
10770         def _elog_listener(self, mysettings, key, logentries, fulltext):
10771                 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10772                 if errors:
10773                         self._failed_pkgs_die_msgs.append(
10774                                 (mysettings, key, errors))
10775
10776         def _locate_failure_log(self, failed_pkg):
10777
10778                 build_dir = failed_pkg.build_dir
10779                 log_file = None
10780
10781                 log_paths = [failed_pkg.build_log]
10782
10783                 for log_path in log_paths:
10784                         if not log_path:
10785                                 continue
10786
10787                         try:
10788                                 log_size = os.stat(log_path).st_size
10789                         except OSError:
10790                                 continue
10791
10792                         if log_size == 0:
10793                                 continue
10794
10795                         return log_path
10796
10797                 return None
10798
10799         def _add_packages(self):
10800                 pkg_queue = self._pkg_queue
10801                 for pkg in self._mergelist:
10802                         if isinstance(pkg, Package):
10803                                 pkg_queue.append(pkg)
10804                         elif isinstance(pkg, Blocker):
10805                                 pass
10806
10807         def _system_merge_started(self, merge):
10808                 """
10809                 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
10810                 """
10811                 graph = self._digraph
10812                 if graph is None:
10813                         return
10814                 pkg = merge.merge.pkg
10815
10816                 # Skip this if $ROOT != / since it shouldn't matter if there
10817                 # are unsatisfied system runtime deps in this case.
10818                 if pkg.root != '/':
10819                         return
10820
10821                 completed_tasks = self._completed_tasks
10822                 unsatisfied = self._unsatisfied_system_deps
10823
10824                 def ignore_non_runtime_or_satisfied(priority):
10825                         """
10826                         Ignore non-runtime and satisfied runtime priorities.
10827                         """
10828                         if isinstance(priority, DepPriority) and \
10829                                 not priority.satisfied and \
10830                                 (priority.runtime or priority.runtime_post):
10831                                 return False
10832                         return True
10833
10834                 # When checking for unsatisfied runtime deps, only check
10835                 # direct deps since indirect deps are checked when the
10836                 # corresponding parent is merged.
10837                 for child in graph.child_nodes(pkg,
10838                         ignore_priority=ignore_non_runtime_or_satisfied):
10839                         if not isinstance(child, Package) or \
10840                                 child.operation == 'uninstall':
10841                                 continue
10842                         if child is pkg:
10843                                 continue
10844                         if child.operation == 'merge' and \
10845                                 child not in completed_tasks:
10846                                 unsatisfied.add(child)
10847
10848         def _merge_wait_exit_handler(self, task):
10849                 self._merge_wait_scheduled.remove(task)
10850                 self._merge_exit(task)
10851
10852         def _merge_exit(self, merge):
10853                 self._do_merge_exit(merge)
10854                 self._deallocate_config(merge.merge.settings)
10855                 if merge.returncode == os.EX_OK and \
10856                         not merge.merge.pkg.installed:
10857                         self._status_display.curval += 1
10858                 self._status_display.merges = len(self._task_queues.merge)
10859                 self._schedule()
10860
10861         def _do_merge_exit(self, merge):
10862                 pkg = merge.merge.pkg
10863                 if merge.returncode != os.EX_OK:
10864                         settings = merge.merge.settings
10865                         build_dir = settings.get("PORTAGE_BUILDDIR")
10866                         build_log = settings.get("PORTAGE_LOG_FILE")
10867
10868                         self._failed_pkgs.append(self._failed_pkg(
10869                                 build_dir=build_dir, build_log=build_log,
10870                                 pkg=pkg,
10871                                 returncode=merge.returncode))
10872                         self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
10873
10874                         self._status_display.failed = len(self._failed_pkgs)
10875                         return
10876
10877                 self._task_complete(pkg)
10878                 pkg_to_replace = merge.merge.pkg_to_replace
10879                 if pkg_to_replace is not None:
10880                         # When a package is replaced, mark it's uninstall
10881                         # task complete (if any).
10882                         uninst_hash_key = \
10883                                 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
10884                         self._task_complete(uninst_hash_key)
10885
10886                 if pkg.installed:
10887                         return
10888
10889                 self._restart_if_necessary(pkg)
10890
10891                 # Call mtimedb.commit() after each merge so that
10892                 # --resume still works after being interrupted
10893                 # by reboot, sigkill or similar.
10894                 mtimedb = self._mtimedb
10895                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10896                 if not mtimedb["resume"]["mergelist"]:
10897                         del mtimedb["resume"]
10898                 mtimedb.commit()
10899
10900         def _build_exit(self, build):
10901                 if build.returncode == os.EX_OK:
10902                         self.curval += 1
10903                         merge = PackageMerge(merge=build)
10904                         if not build.build_opts.buildpkgonly and \
10905                                 build.pkg in self._deep_system_deps:
10906                                 # Since dependencies on system packages are frequently
10907                                 # unspecified, merge them only when no builds are executing.
10908                                 self._merge_wait_queue.append(merge)
10909                                 merge.addStartListener(self._system_merge_started)
10910                         else:
10911                                 merge.addExitListener(self._merge_exit)
10912                                 self._task_queues.merge.add(merge)
10913                                 self._status_display.merges = len(self._task_queues.merge)
10914                 else:
10915                         settings = build.settings
10916                         build_dir = settings.get("PORTAGE_BUILDDIR")
10917                         build_log = settings.get("PORTAGE_LOG_FILE")
10918
10919                         self._failed_pkgs.append(self._failed_pkg(
10920                                 build_dir=build_dir, build_log=build_log,
10921                                 pkg=build.pkg,
10922                                 returncode=build.returncode))
10923                         self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
10924
10925                         self._status_display.failed = len(self._failed_pkgs)
10926                         self._deallocate_config(build.settings)
10927                 self._jobs -= 1
10928                 self._status_display.running = self._jobs
10929                 self._schedule()
10930
10931         def _extract_exit(self, build):
10932                 self._build_exit(build)
10933
10934         def _task_complete(self, pkg):
10935                 self._completed_tasks.add(pkg)
10936                 self._unsatisfied_system_deps.discard(pkg)
10937                 self._choose_pkg_return_early = False
10938
10939         def _merge(self):
10940
10941                 self._add_prefetchers()
10942                 self._add_packages()
10943                 pkg_queue = self._pkg_queue
10944                 failed_pkgs = self._failed_pkgs
10945                 portage.locks._quiet = self._background
10946                 portage.elog._emerge_elog_listener = self._elog_listener
10947                 rval = os.EX_OK
10948
10949                 try:
10950                         self._main_loop()
10951                 finally:
10952                         self._main_loop_cleanup()
10953                         portage.locks._quiet = False
10954                         portage.elog._emerge_elog_listener = None
10955                         if failed_pkgs:
10956                                 rval = failed_pkgs[-1].returncode
10957
10958                 return rval
10959
10960         def _main_loop_cleanup(self):
10961                 del self._pkg_queue[:]
10962                 self._completed_tasks.clear()
10963                 self._deep_system_deps.clear()
10964                 self._unsatisfied_system_deps.clear()
10965                 self._choose_pkg_return_early = False
10966                 self._status_display.reset()
10967                 self._digraph = None
10968                 self._task_queues.fetch.clear()
10969
10970         def _choose_pkg(self):
10971                 """
10972                 Choose a task that has all it's dependencies satisfied.
10973                 """
10974
10975                 if self._choose_pkg_return_early:
10976                         return None
10977
10978                 if self._digraph is None:
10979                         if (self._jobs or self._task_queues.merge) and \
10980                                 not ("--nodeps" in self.myopts and \
10981                                 (self._max_jobs is True or self._max_jobs > 1)):
10982                                 self._choose_pkg_return_early = True
10983                                 return None
10984                         return self._pkg_queue.pop(0)
10985
10986                 if not (self._jobs or self._task_queues.merge):
10987                         return self._pkg_queue.pop(0)
10988
10989                 self._prune_digraph()
10990
10991                 chosen_pkg = None
10992                 later = set(self._pkg_queue)
10993                 for pkg in self._pkg_queue:
10994                         later.remove(pkg)
10995                         if not self._dependent_on_scheduled_merges(pkg, later):
10996                                 chosen_pkg = pkg
10997                                 break
10998
10999                 if chosen_pkg is not None:
11000                         self._pkg_queue.remove(chosen_pkg)
11001
11002                 if chosen_pkg is None:
11003                         # There's no point in searching for a package to
11004                         # choose until at least one of the existing jobs
11005                         # completes.
11006                         self._choose_pkg_return_early = True
11007
11008                 return chosen_pkg
11009
11010         def _dependent_on_scheduled_merges(self, pkg, later):
11011                 """
11012                 Traverse the subgraph of the given packages deep dependencies
11013                 to see if it contains any scheduled merges.
11014                 @param pkg: a package to check dependencies for
11015                 @type pkg: Package
11016                 @param later: packages for which dependence should be ignored
11017                         since they will be merged later than pkg anyway and therefore
11018                         delaying the merge of pkg will not result in a more optimal
11019                         merge order
11020                 @type later: set
11021                 @rtype: bool
11022                 @returns: True if the package is dependent, False otherwise.
11023                 """
11024
11025                 graph = self._digraph
11026                 completed_tasks = self._completed_tasks
11027
11028                 dependent = False
11029                 traversed_nodes = set([pkg])
11030                 direct_deps = graph.child_nodes(pkg)
11031                 node_stack = direct_deps
11032                 direct_deps = frozenset(direct_deps)
11033                 while node_stack:
11034                         node = node_stack.pop()
11035                         if node in traversed_nodes:
11036                                 continue
11037                         traversed_nodes.add(node)
11038                         if not ((node.installed and node.operation == "nomerge") or \
11039                                 (node.operation == "uninstall" and \
11040                                 node not in direct_deps) or \
11041                                 node in completed_tasks or \
11042                                 node in later):
11043                                 dependent = True
11044                                 break
11045                         node_stack.extend(graph.child_nodes(node))
11046
11047                 return dependent
11048
11049         def _allocate_config(self, root):
11050                 """
11051                 Allocate a unique config instance for a task in order
11052                 to prevent interference between parallel tasks.
11053                 """
11054                 if self._config_pool[root]:
11055                         temp_settings = self._config_pool[root].pop()
11056                 else:
11057                         temp_settings = portage.config(clone=self.pkgsettings[root])
11058                 # Since config.setcpv() isn't guaranteed to call config.reset() due to
11059                 # performance reasons, call it here to make sure all settings from the
11060                 # previous package get flushed out (such as PORTAGE_LOG_FILE).
11061                 temp_settings.reload()
11062                 temp_settings.reset()
11063                 return temp_settings
11064
11065         def _deallocate_config(self, settings):
11066                 self._config_pool[settings["ROOT"]].append(settings)
11067
11068         def _main_loop(self):
11069
11070                 # Only allow 1 job max if a restart is scheduled
11071                 # due to portage update.
11072                 if self._is_restart_scheduled() or \
11073                         self._opts_no_background.intersection(self.myopts):
11074                         self._set_max_jobs(1)
11075
11076                 merge_queue = self._task_queues.merge
11077
11078                 while self._schedule():
11079                         if self._poll_event_handlers:
11080                                 self._poll_loop()
11081
11082                 while True:
11083                         self._schedule()
11084                         if not (self._jobs or merge_queue):
11085                                 break
11086                         if self._poll_event_handlers:
11087                                 self._poll_loop()
11088
11089         def _keep_scheduling(self):
11090                 return bool(self._pkg_queue and \
11091                         not (self._failed_pkgs and not self._build_opts.fetchonly))
11092
11093         def _schedule_tasks(self):
11094
11095                 # When the number of jobs drops to zero, process all waiting merges.
11096                 if not self._jobs and self._merge_wait_queue:
11097                         for task in self._merge_wait_queue:
11098                                 task.addExitListener(self._merge_wait_exit_handler)
11099                                 self._task_queues.merge.add(task)
11100                         self._status_display.merges = len(self._task_queues.merge)
11101                         self._merge_wait_scheduled.extend(self._merge_wait_queue)
11102                         del self._merge_wait_queue[:]
11103
11104                 self._schedule_tasks_imp()
11105                 self._status_display.display()
11106
11107                 state_change = 0
11108                 for q in self._task_queues.values():
11109                         if q.schedule():
11110                                 state_change += 1
11111
11112                 # Cancel prefetchers if they're the only reason
11113                 # the main poll loop is still running.
11114                 if self._failed_pkgs and not self._build_opts.fetchonly and \
11115                         not (self._jobs or self._task_queues.merge) and \
11116                         self._task_queues.fetch:
11117                         self._task_queues.fetch.clear()
11118                         state_change += 1
11119
11120                 if state_change:
11121                         self._schedule_tasks_imp()
11122                         self._status_display.display()
11123
11124                 return self._keep_scheduling()
11125
11126         def _job_delay(self):
11127                 """
11128                 @rtype: bool
11129                 @returns: True if job scheduling should be delayed, False otherwise.
11130                 """
11131
11132                 if self._jobs and self._max_load is not None:
11133
11134                         current_time = time.time()
11135
11136                         delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
11137                         if delay > self._job_delay_max:
11138                                 delay = self._job_delay_max
11139                         if (current_time - self._previous_job_start_time) < delay:
11140                                 return True
11141
11142                 return False
11143
11144         def _schedule_tasks_imp(self):
11145                 """
11146                 @rtype: bool
11147                 @returns: True if state changed, False otherwise.
11148                 """
11149
11150                 state_change = 0
11151
11152                 while True:
11153
11154                         if not self._keep_scheduling():
11155                                 return bool(state_change)
11156
11157                         if self._choose_pkg_return_early or \
11158                                 self._merge_wait_scheduled or \
11159                                 (self._jobs and self._unsatisfied_system_deps) or \
11160                                 not self._can_add_job() or \
11161                                 self._job_delay():
11162                                 return bool(state_change)
11163
11164                         pkg = self._choose_pkg()
11165                         if pkg is None:
11166                                 return bool(state_change)
11167
11168                         state_change += 1
11169
11170                         if not pkg.installed:
11171                                 self._pkg_count.curval += 1
11172
11173                         task = self._task(pkg)
11174
11175                         if pkg.installed:
11176                                 merge = PackageMerge(merge=task)
11177                                 merge.addExitListener(self._merge_exit)
11178                                 self._task_queues.merge.add(merge)
11179
11180                         elif pkg.built:
11181                                 self._jobs += 1
11182                                 self._previous_job_start_time = time.time()
11183                                 self._status_display.running = self._jobs
11184                                 task.addExitListener(self._extract_exit)
11185                                 self._task_queues.jobs.add(task)
11186
11187                         else:
11188                                 self._jobs += 1
11189                                 self._previous_job_start_time = time.time()
11190                                 self._status_display.running = self._jobs
11191                                 task.addExitListener(self._build_exit)
11192                                 self._task_queues.jobs.add(task)
11193
11194                 return bool(state_change)
11195
11196         def _task(self, pkg):
11197
11198                 pkg_to_replace = None
11199                 if pkg.operation != "uninstall":
11200                         vardb = pkg.root_config.trees["vartree"].dbapi
11201                         previous_cpv = vardb.match(pkg.slot_atom)
11202                         if previous_cpv:
11203                                 previous_cpv = previous_cpv.pop()
11204                                 pkg_to_replace = self._pkg(previous_cpv,
11205                                         "installed", pkg.root_config, installed=True)
11206
11207                 task = MergeListItem(args_set=self._args_set,
11208                         background=self._background, binpkg_opts=self._binpkg_opts,
11209                         build_opts=self._build_opts,
11210                         config_pool=self._ConfigPool(pkg.root,
11211                         self._allocate_config, self._deallocate_config),
11212                         emerge_opts=self.myopts,
11213                         find_blockers=self._find_blockers(pkg), logger=self._logger,
11214                         mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
11215                         pkg_to_replace=pkg_to_replace,
11216                         prefetcher=self._prefetchers.get(pkg),
11217                         scheduler=self._sched_iface,
11218                         settings=self._allocate_config(pkg.root),
11219                         statusMessage=self._status_msg,
11220                         world_atom=self._world_atom)
11221
11222                 return task
11223
11224         def _failed_pkg_msg(self, failed_pkg, action, preposition):
11225                 pkg = failed_pkg.pkg
11226                 msg = "%s to %s %s" % \
11227                         (bad("Failed"), action, colorize("INFORM", pkg.cpv))
11228                 if pkg.root != "/":
11229                         msg += " %s %s" % (preposition, pkg.root)
11230
11231                 log_path = self._locate_failure_log(failed_pkg)
11232                 if log_path is not None:
11233                         msg += ", Log file:"
11234                 self._status_msg(msg)
11235
11236                 if log_path is not None:
11237                         self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
11238
11239         def _status_msg(self, msg):
11240                 """
11241                 Display a brief status message (no newlines) in the status display.
11242                 This is called by tasks to provide feedback to the user. This
11243                 delegates the resposibility of generating \r and \n control characters,
11244                 to guarantee that lines are created or erased when necessary and
11245                 appropriate.
11246
11247                 @type msg: str
11248                 @param msg: a brief status message (no newlines allowed)
11249                 """
11250                 if not self._background:
11251                         writemsg_level("\n")
11252                 self._status_display.displayMessage(msg)
11253
11254         def _save_resume_list(self):
11255                 """
11256                 Do this before verifying the ebuild Manifests since it might
11257                 be possible for the user to use --resume --skipfirst get past
11258                 a non-essential package with a broken digest.
11259                 """
11260                 mtimedb = self._mtimedb
11261                 mtimedb["resume"]["mergelist"] = [list(x) \
11262                         for x in self._mergelist \
11263                         if isinstance(x, Package) and x.operation == "merge"]
11264
11265                 mtimedb.commit()
11266
11267         def _calc_resume_list(self):
11268                 """
11269                 Use the current resume list to calculate a new one,
11270                 dropping any packages with unsatisfied deps.
11271                 @rtype: bool
11272                 @returns: True if successful, False otherwise.
11273                 """
11274                 print colorize("GOOD", "*** Resuming merge...")
11275
11276                 if self._show_list():
11277                         if "--tree" in self.myopts:
11278                                 portage.writemsg_stdout("\n" + \
11279                                         darkgreen("These are the packages that " + \
11280                                         "would be merged, in reverse order:\n\n"))
11281
11282                         else:
11283                                 portage.writemsg_stdout("\n" + \
11284                                         darkgreen("These are the packages that " + \
11285                                         "would be merged, in order:\n\n"))
11286
11287                 show_spinner = "--quiet" not in self.myopts and \
11288                         "--nodeps" not in self.myopts
11289
11290                 if show_spinner:
11291                         print "Calculating dependencies  ",
11292
11293                 myparams = create_depgraph_params(self.myopts, None)
11294                 success = False
11295                 e = None
11296                 try:
11297                         success, mydepgraph, dropped_tasks = resume_depgraph(
11298                                 self.settings, self.trees, self._mtimedb, self.myopts,
11299                                 myparams, self._spinner)
11300                 except depgraph.UnsatisfiedResumeDep, exc:
11301                         # rename variable to avoid python-3.0 error:
11302                         # SyntaxError: can not delete variable 'e' referenced in nested
11303                         #              scope
11304                         e = exc
11305                         mydepgraph = e.depgraph
11306                         dropped_tasks = set()
11307
11308                 if show_spinner:
11309                         print "\b\b... done!"
11310
11311                 if e is not None:
11312                         def unsatisfied_resume_dep_msg():
11313                                 mydepgraph.display_problems()
11314                                 out = portage.output.EOutput()
11315                                 out.eerror("One or more packages are either masked or " + \
11316                                         "have missing dependencies:")
11317                                 out.eerror("")
11318                                 indent = "  "
11319                                 show_parents = set()
11320                                 for dep in e.value:
11321                                         if dep.parent in show_parents:
11322                                                 continue
11323                                         show_parents.add(dep.parent)
11324                                         if dep.atom is None:
11325                                                 out.eerror(indent + "Masked package:")
11326                                                 out.eerror(2 * indent + str(dep.parent))
11327                                                 out.eerror("")
11328                                         else:
11329                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
11330                                                 out.eerror(2 * indent + str(dep.parent))
11331                                                 out.eerror("")
11332                                 msg = "The resume list contains packages " + \
11333                                         "that are either masked or have " + \
11334                                         "unsatisfied dependencies. " + \
11335                                         "Please restart/continue " + \
11336                                         "the operation manually, or use --skipfirst " + \
11337                                         "to skip the first package in the list and " + \
11338                                         "any other packages that may be " + \
11339                                         "masked or have missing dependencies."
11340                                 for line in textwrap.wrap(msg, 72):
11341                                         out.eerror(line)
11342                         self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
11343                         return False
11344
11345                 if success and self._show_list():
11346                         mylist = mydepgraph.altlist()
11347                         if mylist:
11348                                 if "--tree" in self.myopts:
11349                                         mylist.reverse()
11350                                 mydepgraph.display(mylist, favorites=self._favorites)
11351
11352                 if not success:
11353                         self._post_mod_echo_msgs.append(mydepgraph.display_problems)
11354                         return False
11355                 mydepgraph.display_problems()
11356
11357                 mylist = mydepgraph.altlist()
11358                 mydepgraph.break_refs(mylist)
11359                 mydepgraph.break_refs(dropped_tasks)
11360                 self._mergelist = mylist
11361                 self._set_digraph(mydepgraph.schedulerGraph())
11362
11363                 msg_width = 75
11364                 for task in dropped_tasks:
11365                         if not (isinstance(task, Package) and task.operation == "merge"):
11366                                 continue
11367                         pkg = task
11368                         msg = "emerge --keep-going:" + \
11369                                 " %s" % (pkg.cpv,)
11370                         if pkg.root != "/":
11371                                 msg += " for %s" % (pkg.root,)
11372                         msg += " dropped due to unsatisfied dependency."
11373                         for line in textwrap.wrap(msg, msg_width):
11374                                 eerror(line, phase="other", key=pkg.cpv)
11375                         settings = self.pkgsettings[pkg.root]
11376                         # Ensure that log collection from $T is disabled inside
11377                         # elog_process(), since any logs that might exist are
11378                         # not valid here.
11379                         settings.pop("T", None)
11380                         portage.elog.elog_process(pkg.cpv, settings)
11381                         self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
11382
11383                 return True
11384
11385         def _show_list(self):
11386                 myopts = self.myopts
11387                 if "--quiet" not in myopts and \
11388                         ("--ask" in myopts or "--tree" in myopts or \
11389                         "--verbose" in myopts):
11390                         return True
11391                 return False
11392
11393         def _world_atom(self, pkg):
11394                 """
11395                 Add the package to the world file, but only if
11396                 it's supposed to be added. Otherwise, do nothing.
11397                 """
11398
11399                 if set(("--buildpkgonly", "--fetchonly",
11400                         "--fetch-all-uri",
11401                         "--oneshot", "--onlydeps",
11402                         "--pretend")).intersection(self.myopts):
11403                         return
11404
11405                 if pkg.root != self.target_root:
11406                         return
11407
11408                 args_set = self._args_set
11409                 if not args_set.findAtomForPackage(pkg):
11410                         return
11411
11412                 logger = self._logger
11413                 pkg_count = self._pkg_count
11414                 root_config = pkg.root_config
11415                 world_set = root_config.sets["world"]
11416                 world_locked = False
11417                 if hasattr(world_set, "lock"):
11418                         world_set.lock()
11419                         world_locked = True
11420
11421                 try:
11422                         if hasattr(world_set, "load"):
11423                                 world_set.load() # maybe it's changed on disk
11424
11425                         atom = create_world_atom(pkg, args_set, root_config)
11426                         if atom:
11427                                 if hasattr(world_set, "add"):
11428                                         self._status_msg(('Recording %s in "world" ' + \
11429                                                 'favorites file...') % atom)
11430                                         logger.log(" === (%s of %s) Updating world file (%s)" % \
11431                                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
11432                                         world_set.add(atom)
11433                                 else:
11434                                         writemsg_level('\n!!! Unable to record %s in "world"\n' % \
11435                                                 (atom,), level=logging.WARN, noiselevel=-1)
11436                 finally:
11437                         if world_locked:
11438                                 world_set.unlock()
11439
11440         def _pkg(self, cpv, type_name, root_config, installed=False):
11441                 """
11442                 Get a package instance from the cache, or create a new
11443                 one if necessary. Raises KeyError from aux_get if it
11444                 failures for some reason (package does not exist or is
11445                 corrupt).
11446                 """
11447                 operation = "merge"
11448                 if installed:
11449                         operation = "nomerge"
11450
11451                 if self._digraph is not None:
11452                         # Reuse existing instance when available.
11453                         pkg = self._digraph.get(
11454                                 (type_name, root_config.root, cpv, operation))
11455                         if pkg is not None:
11456                                 return pkg
11457
11458                 tree_type = depgraph.pkg_tree_map[type_name]
11459                 db = root_config.trees[tree_type].dbapi
11460                 db_keys = list(self.trees[root_config.root][
11461                         tree_type].dbapi._aux_cache_keys)
11462                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11463                 pkg = Package(cpv=cpv, metadata=metadata,
11464                         root_config=root_config, installed=installed)
11465                 if type_name == "ebuild":
11466                         settings = self.pkgsettings[root_config.root]
11467                         settings.setcpv(pkg)
11468                         pkg.metadata["USE"] = settings["PORTAGE_USE"]
11469                         pkg.metadata['CHOST'] = settings.get('CHOST', '')
11470
11471                 return pkg
11472
11473 class MetadataRegen(PollScheduler):
11474
11475         def __init__(self, portdb, max_jobs=None, max_load=None):
11476                 PollScheduler.__init__(self)
11477                 self._portdb = portdb
11478
11479                 if max_jobs is None:
11480                         max_jobs = 1
11481
11482                 self._max_jobs = max_jobs
11483                 self._max_load = max_load
11484                 self._sched_iface = self._sched_iface_class(
11485                         register=self._register,
11486                         schedule=self._schedule_wait,
11487                         unregister=self._unregister)
11488
11489                 self._valid_pkgs = set()
11490                 self._process_iter = self._iter_metadata_processes()
11491                 self.returncode = os.EX_OK
11492                 self._error_count = 0
11493
11494         def _iter_metadata_processes(self):
11495                 portdb = self._portdb
11496                 valid_pkgs = self._valid_pkgs
11497                 every_cp = portdb.cp_all()
11498                 every_cp.sort(reverse=True)
11499
11500                 while every_cp:
11501                         cp = every_cp.pop()
11502                         portage.writemsg_stdout("Processing %s\n" % cp)
11503                         cpv_list = portdb.cp_list(cp)
11504                         for cpv in cpv_list:
11505                                 valid_pkgs.add(cpv)
11506                                 ebuild_path, repo_path = portdb.findname2(cpv)
11507                                 metadata_process = portdb._metadata_process(
11508                                         cpv, ebuild_path, repo_path)
11509                                 if metadata_process is None:
11510                                         continue
11511                                 yield metadata_process
11512
11513         def run(self):
11514
11515                 portdb = self._portdb
11516                 from portage.cache.cache_errors import CacheError
11517                 dead_nodes = {}
11518
11519                 for mytree in portdb.porttrees:
11520                         try:
11521                                 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11522                         except CacheError, e:
11523                                 portage.writemsg("Error listing cache entries for " + \
11524                                         "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1)
11525                                 del e
11526                                 dead_nodes = None
11527                                 break
11528
11529                 while self._schedule():
11530                         self._poll_loop()
11531
11532                 while self._jobs:
11533                         self._poll_loop()
11534
11535                 if dead_nodes:
11536                         for y in self._valid_pkgs:
11537                                 for mytree in portdb.porttrees:
11538                                         if portdb.findname2(y, mytree=mytree)[0]:
11539                                                 dead_nodes[mytree].discard(y)
11540
11541                         for mytree, nodes in dead_nodes.iteritems():
11542                                 auxdb = portdb.auxdb[mytree]
11543                                 for y in nodes:
11544                                         try:
11545                                                 del auxdb[y]
11546                                         except (KeyError, CacheError):
11547                                                 pass
11548
11549         def _schedule_tasks(self):
11550                 """
11551                 @rtype: bool
11552                 @returns: True if there may be remaining tasks to schedule,
11553                         False otherwise.
11554                 """
11555                 while self._can_add_job():
11556                         try:
11557                                 metadata_process = self._process_iter.next()
11558                         except StopIteration:
11559                                 return False
11560
11561                         self._jobs += 1
11562                         metadata_process.scheduler = self._sched_iface
11563                         metadata_process.addExitListener(self._metadata_exit)
11564                         metadata_process.start()
11565                 return True
11566
11567         def _metadata_exit(self, metadata_process):
11568                 self._jobs -= 1
11569                 if metadata_process.returncode != os.EX_OK:
11570                         self.returncode = 1
11571                         self._error_count += 1
11572                         self._valid_pkgs.discard(metadata_process.cpv)
11573                         portage.writemsg("Error processing %s, continuing...\n" % \
11574                                 (metadata_process.cpv,))
11575                 self._schedule()
11576
11577 class UninstallFailure(portage.exception.PortageException):
11578         """
11579         An instance of this class is raised by unmerge() when
11580         an uninstallation fails.
11581         """
11582         status = 1
11583         def __init__(self, *pargs):
11584                 portage.exception.PortageException.__init__(self, pargs)
11585                 if pargs:
11586                         self.status = pargs[0]
11587
11588 def unmerge(root_config, myopts, unmerge_action,
11589         unmerge_files, ldpath_mtimes, autoclean=0,
11590         clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11591         scheduler=None, writemsg_level=portage.util.writemsg_level):
11592
11593         quiet = "--quiet" in myopts
11594         settings = root_config.settings
11595         sets = root_config.sets
11596         vartree = root_config.trees["vartree"]
11597         candidate_catpkgs=[]
11598         global_unmerge=0
11599         xterm_titles = "notitles" not in settings.features
11600         out = portage.output.EOutput()
11601         pkg_cache = {}
11602         db_keys = list(vartree.dbapi._aux_cache_keys)
11603
11604         def _pkg(cpv):
11605                 pkg = pkg_cache.get(cpv)
11606                 if pkg is None:
11607                         pkg = Package(cpv=cpv, installed=True,
11608                                 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11609                                 root_config=root_config,
11610                                 type_name="installed")
11611                         pkg_cache[cpv] = pkg
11612                 return pkg
11613
11614         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11615         try:
11616                 # At least the parent needs to exist for the lock file.
11617                 portage.util.ensure_dirs(vdb_path)
11618         except portage.exception.PortageException:
11619                 pass
11620         vdb_lock = None
11621         try:
11622                 if os.access(vdb_path, os.W_OK):
11623                         vdb_lock = portage.locks.lockdir(vdb_path)
11624                 realsyslist = sets["system"].getAtoms()
11625                 syslist = []
11626                 for x in realsyslist:
11627                         mycp = portage.dep_getkey(x)
11628                         if mycp in settings.getvirtuals():
11629                                 providers = []
11630                                 for provider in settings.getvirtuals()[mycp]:
11631                                         if vartree.dbapi.match(provider):
11632                                                 providers.append(provider)
11633                                 if len(providers) == 1:
11634                                         syslist.extend(providers)
11635                         else:
11636                                 syslist.append(mycp)
11637         
11638                 mysettings = portage.config(clone=settings)
11639         
11640                 if not unmerge_files:
11641                         if unmerge_action == "unmerge":
11642                                 print
11643                                 print bold("emerge unmerge") + " can only be used with specific package names"
11644                                 print
11645                                 return 0
11646                         else:
11647                                 global_unmerge = 1
11648         
11649                 localtree = vartree
11650                 # process all arguments and add all
11651                 # valid db entries to candidate_catpkgs
11652                 if global_unmerge:
11653                         if not unmerge_files:
11654                                 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11655                 else:
11656                         #we've got command-line arguments
11657                         if not unmerge_files:
11658                                 print "\nNo packages to unmerge have been provided.\n"
11659                                 return 0
11660                         for x in unmerge_files:
11661                                 arg_parts = x.split('/')
11662                                 if x[0] not in [".","/"] and \
11663                                         arg_parts[-1][-7:] != ".ebuild":
11664                                         #possible cat/pkg or dep; treat as such
11665                                         candidate_catpkgs.append(x)
11666                                 elif unmerge_action in ["prune","clean"]:
11667                                         print "\n!!! Prune and clean do not accept individual" + \
11668                                                 " ebuilds as arguments;\n    skipping.\n"
11669                                         continue
11670                                 else:
11671                                         # it appears that the user is specifying an installed
11672                                         # ebuild and we're in "unmerge" mode, so it's ok.
11673                                         if not os.path.exists(x):
11674                                                 print "\n!!! The path '"+x+"' doesn't exist.\n"
11675                                                 return 0
11676         
11677                                         absx   = os.path.abspath(x)
11678                                         sp_absx = absx.split("/")
11679                                         if sp_absx[-1][-7:] == ".ebuild":
11680                                                 del sp_absx[-1]
11681                                                 absx = "/".join(sp_absx)
11682         
11683                                         sp_absx_len = len(sp_absx)
11684         
11685                                         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11686                                         vdb_len  = len(vdb_path)
11687         
11688                                         sp_vdb     = vdb_path.split("/")
11689                                         sp_vdb_len = len(sp_vdb)
11690         
11691                                         if not os.path.exists(absx+"/CONTENTS"):
11692                                                 print "!!! Not a valid db dir: "+str(absx)
11693                                                 return 0
11694         
11695                                         if sp_absx_len <= sp_vdb_len:
11696                                                 # The Path is shorter... so it can't be inside the vdb.
11697                                                 print sp_absx
11698                                                 print absx
11699                                                 print "\n!!!",x,"cannot be inside "+ \
11700                                                         vdb_path+"; aborting.\n"
11701                                                 return 0
11702         
11703                                         for idx in range(0,sp_vdb_len):
11704                                                 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11705                                                         print sp_absx
11706                                                         print absx
11707                                                         print "\n!!!", x, "is not inside "+\
11708                                                                 vdb_path+"; aborting.\n"
11709                                                         return 0
11710         
11711                                         print "="+"/".join(sp_absx[sp_vdb_len:])
11712                                         candidate_catpkgs.append(
11713                                                 "="+"/".join(sp_absx[sp_vdb_len:]))
11714         
11715                 newline=""
11716                 if (not "--quiet" in myopts):
11717                         newline="\n"
11718                 if settings["ROOT"] != "/":
11719                         writemsg_level(darkgreen(newline+ \
11720                                 ">>> Using system located in ROOT tree %s\n" % \
11721                                 settings["ROOT"]))
11722
11723                 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11724                         not ("--quiet" in myopts):
11725                         writemsg_level(darkgreen(newline+\
11726                                 ">>> These are the packages that would be unmerged:\n"))
11727
11728                 # Preservation of order is required for --depclean and --prune so
11729                 # that dependencies are respected. Use all_selected to eliminate
11730                 # duplicate packages since the same package may be selected by
11731                 # multiple atoms.
11732                 pkgmap = []
11733                 all_selected = set()
11734                 for x in candidate_catpkgs:
11735                         # cycle through all our candidate deps and determine
11736                         # what will and will not get unmerged
11737                         try:
11738                                 mymatch = vartree.dbapi.match(x)
11739                         except portage.exception.AmbiguousPackageName, errpkgs:
11740                                 print "\n\n!!! The short ebuild name \"" + \
11741                                         x + "\" is ambiguous.  Please specify"
11742                                 print "!!! one of the following fully-qualified " + \
11743                                         "ebuild names instead:\n"
11744                                 for i in errpkgs[0]:
11745                                         print "    " + green(i)
11746                                 print
11747                                 sys.exit(1)
11748         
11749                         if not mymatch and x[0] not in "<>=~":
11750                                 mymatch = localtree.dep_match(x)
11751                         if not mymatch:
11752                                 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11753                                         (x, unmerge_action), noiselevel=-1)
11754                                 continue
11755
11756                         pkgmap.append(
11757                                 {"protected": set(), "selected": set(), "omitted": set()})
11758                         mykey = len(pkgmap) - 1
11759                         if unmerge_action=="unmerge":
11760                                         for y in mymatch:
11761                                                 if y not in all_selected:
11762                                                         pkgmap[mykey]["selected"].add(y)
11763                                                         all_selected.add(y)
11764                         elif unmerge_action == "prune":
11765                                 if len(mymatch) == 1:
11766                                         continue
11767                                 best_version = mymatch[0]
11768                                 best_slot = vartree.getslot(best_version)
11769                                 best_counter = vartree.dbapi.cpv_counter(best_version)
11770                                 for mypkg in mymatch[1:]:
11771                                         myslot = vartree.getslot(mypkg)
11772                                         mycounter = vartree.dbapi.cpv_counter(mypkg)
11773                                         if (myslot == best_slot and mycounter > best_counter) or \
11774                                                 mypkg == portage.best([mypkg, best_version]):
11775                                                 if myslot == best_slot:
11776                                                         if mycounter < best_counter:
11777                                                                 # On slot collision, keep the one with the
11778                                                                 # highest counter since it is the most
11779                                                                 # recently installed.
11780                                                                 continue
11781                                                 best_version = mypkg
11782                                                 best_slot = myslot
11783                                                 best_counter = mycounter
11784                                 pkgmap[mykey]["protected"].add(best_version)
11785                                 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
11786                                         if mypkg != best_version and mypkg not in all_selected)
11787                                 all_selected.update(pkgmap[mykey]["selected"])
11788                         else:
11789                                 # unmerge_action == "clean"
11790                                 slotmap={}
11791                                 for mypkg in mymatch:
11792                                         if unmerge_action == "clean":
11793                                                 myslot = localtree.getslot(mypkg)
11794                                         else:
11795                                                 # since we're pruning, we don't care about slots
11796                                                 # and put all the pkgs in together
11797                                                 myslot = 0
11798                                         if myslot not in slotmap:
11799                                                 slotmap[myslot] = {}
11800                                         slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
11801
11802                                 for mypkg in vartree.dbapi.cp_list(
11803                                         portage.dep_getkey(mymatch[0])):
11804                                         myslot = vartree.getslot(mypkg)
11805                                         if myslot not in slotmap:
11806                                                 slotmap[myslot] = {}
11807                                         slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
11808
11809                                 for myslot in slotmap:
11810                                         counterkeys = slotmap[myslot].keys()
11811                                         if not counterkeys:
11812                                                 continue
11813                                         counterkeys.sort()
11814                                         pkgmap[mykey]["protected"].add(
11815                                                 slotmap[myslot][counterkeys[-1]])
11816                                         del counterkeys[-1]
11817
11818                                         for counter in counterkeys[:]:
11819                                                 mypkg = slotmap[myslot][counter]
11820                                                 if mypkg not in mymatch:
11821                                                         counterkeys.remove(counter)
11822                                                         pkgmap[mykey]["protected"].add(
11823                                                                 slotmap[myslot][counter])
11824
11825                                         #be pretty and get them in order of merge:
11826                                         for ckey in counterkeys:
11827                                                 mypkg = slotmap[myslot][ckey]
11828                                                 if mypkg not in all_selected:
11829                                                         pkgmap[mykey]["selected"].add(mypkg)
11830                                                         all_selected.add(mypkg)
11831                                         # ok, now the last-merged package
11832                                         # is protected, and the rest are selected
11833                 numselected = len(all_selected)
11834                 if global_unmerge and not numselected:
11835                         portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
11836                         return 0
11837         
11838                 if not numselected:
11839                         portage.writemsg_stdout(
11840                                 "\n>>> No packages selected for removal by " + \
11841                                 unmerge_action + "\n")
11842                         return 0
11843         finally:
11844                 if vdb_lock:
11845                         vartree.dbapi.flush_cache()
11846                         portage.locks.unlockdir(vdb_lock)
11847         
11848         from portage.sets.base import EditablePackageSet
11849         
11850         # generate a list of package sets that are directly or indirectly listed in "world",
11851         # as there is no persistent list of "installed" sets
11852         installed_sets = ["world"]
11853         stop = False
11854         pos = 0
11855         while not stop:
11856                 stop = True
11857                 pos = len(installed_sets)
11858                 for s in installed_sets[pos - 1:]:
11859                         if s not in sets:
11860                                 continue
11861                         candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
11862                         if candidates:
11863                                 stop = False
11864                                 installed_sets += candidates
11865         installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
11866         del stop, pos
11867
11868         # we don't want to unmerge packages that are still listed in user-editable package sets
11869         # listed in "world" as they would be remerged on the next update of "world" or the 
11870         # relevant package sets.
11871         unknown_sets = set()
11872         for cp in xrange(len(pkgmap)):
11873                 for cpv in pkgmap[cp]["selected"].copy():
11874                         try:
11875                                 pkg = _pkg(cpv)
11876                         except KeyError:
11877                                 # It could have been uninstalled
11878                                 # by a concurrent process.
11879                                 continue
11880
11881                         if unmerge_action != "clean" and \
11882                                 root_config.root == "/" and \
11883                                 portage.match_from_list(
11884                                 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
11885                                 msg = ("Not unmerging package %s since there is no valid " + \
11886                                 "reason for portage to unmerge itself.") % (pkg.cpv,)
11887                                 for line in textwrap.wrap(msg, 75):
11888                                         out.eerror(line)
11889                                 # adjust pkgmap so the display output is correct
11890                                 pkgmap[cp]["selected"].remove(cpv)
11891                                 all_selected.remove(cpv)
11892                                 pkgmap[cp]["protected"].add(cpv)
11893                                 continue
11894
11895                         parents = []
11896                         for s in installed_sets:
11897                                 # skip sets that the user requested to unmerge, and skip world 
11898                                 # unless we're unmerging a package set (as the package would be 
11899                                 # removed from "world" later on)
11900                                 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
11901                                         continue
11902
11903                                 if s not in sets:
11904                                         if s in unknown_sets:
11905                                                 continue
11906                                         unknown_sets.add(s)
11907                                         out = portage.output.EOutput()
11908                                         out.eerror(("Unknown set '@%s' in " + \
11909                                                 "%svar/lib/portage/world_sets") % \
11910                                                 (s, root_config.root))
11911                                         continue
11912
11913                                 # only check instances of EditablePackageSet as other classes are generally used for
11914                                 # special purposes and can be ignored here (and are usually generated dynamically, so the
11915                                 # user can't do much about them anyway)
11916                                 if isinstance(sets[s], EditablePackageSet):
11917
11918                                         # This is derived from a snippet of code in the
11919                                         # depgraph._iter_atoms_for_pkg() method.
11920                                         for atom in sets[s].iterAtomsForPackage(pkg):
11921                                                 inst_matches = vartree.dbapi.match(atom)
11922                                                 inst_matches.reverse() # descending order
11923                                                 higher_slot = None
11924                                                 for inst_cpv in inst_matches:
11925                                                         try:
11926                                                                 inst_pkg = _pkg(inst_cpv)
11927                                                         except KeyError:
11928                                                                 # It could have been uninstalled
11929                                                                 # by a concurrent process.
11930                                                                 continue
11931
11932                                                         if inst_pkg.cp != atom.cp:
11933                                                                 continue
11934                                                         if pkg >= inst_pkg:
11935                                                                 # This is descending order, and we're not
11936                                                                 # interested in any versions <= pkg given.
11937                                                                 break
11938                                                         if pkg.slot_atom != inst_pkg.slot_atom:
11939                                                                 higher_slot = inst_pkg
11940                                                                 break
11941                                                 if higher_slot is None:
11942                                                         parents.append(s)
11943                                                         break
11944                         if parents:
11945                                 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
11946                                 #print colorize("WARN", "but still listed in the following package sets:")
11947                                 #print "    %s\n" % ", ".join(parents)
11948                                 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
11949                                 print colorize("WARN", "still referenced by the following package sets:")
11950                                 print "    %s\n" % ", ".join(parents)
11951                                 # adjust pkgmap so the display output is correct
11952                                 pkgmap[cp]["selected"].remove(cpv)
11953                                 all_selected.remove(cpv)
11954                                 pkgmap[cp]["protected"].add(cpv)
11955         
11956         del installed_sets
11957
11958         numselected = len(all_selected)
11959         if not numselected:
11960                 writemsg_level(
11961                         "\n>>> No packages selected for removal by " + \
11962                         unmerge_action + "\n")
11963                 return 0
11964
11965         # Unmerge order only matters in some cases
11966         if not ordered:
11967                 unordered = {}
11968                 for d in pkgmap:
11969                         selected = d["selected"]
11970                         if not selected:
11971                                 continue
11972                         cp = portage.cpv_getkey(iter(selected).next())
11973                         cp_dict = unordered.get(cp)
11974                         if cp_dict is None:
11975                                 cp_dict = {}
11976                                 unordered[cp] = cp_dict
11977                                 for k in d:
11978                                         cp_dict[k] = set()
11979                         for k, v in d.iteritems():
11980                                 cp_dict[k].update(v)
11981                 pkgmap = [unordered[cp] for cp in sorted(unordered)]
11982
11983         for x in xrange(len(pkgmap)):
11984                 selected = pkgmap[x]["selected"]
11985                 if not selected:
11986                         continue
11987                 for mytype, mylist in pkgmap[x].iteritems():
11988                         if mytype == "selected":
11989                                 continue
11990                         mylist.difference_update(all_selected)
11991                 cp = portage.cpv_getkey(iter(selected).next())
11992                 for y in localtree.dep_match(cp):
11993                         if y not in pkgmap[x]["omitted"] and \
11994                                 y not in pkgmap[x]["selected"] and \
11995                                 y not in pkgmap[x]["protected"] and \
11996                                 y not in all_selected:
11997                                 pkgmap[x]["omitted"].add(y)
11998                 if global_unmerge and not pkgmap[x]["selected"]:
11999                         #avoid cluttering the preview printout with stuff that isn't getting unmerged
12000                         continue
12001                 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
12002                         writemsg_level(colorize("BAD","\a\n\n!!! " + \
12003                                 "'%s' is part of your system profile.\n" % cp),
12004                                 level=logging.WARNING, noiselevel=-1)
12005                         writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
12006                                 "be damaging to your system.\n\n"),
12007                                 level=logging.WARNING, noiselevel=-1)
12008                         if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
12009                                 countdown(int(settings["EMERGE_WARNING_DELAY"]),
12010                                         colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
12011                 if not quiet:
12012                         writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
12013                 else:
12014                         writemsg_level(bold(cp) + ": ", noiselevel=-1)
12015                 for mytype in ["selected","protected","omitted"]:
12016                         if not quiet:
12017                                 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
12018                         if pkgmap[x][mytype]:
12019                                 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
12020                                 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
12021                                 for pn, ver, rev in sorted_pkgs:
12022                                         if rev == "r0":
12023                                                 myversion = ver
12024                                         else:
12025                                                 myversion = ver + "-" + rev
12026                                         if mytype == "selected":
12027                                                 writemsg_level(
12028                                                         colorize("UNMERGE_WARN", myversion + " "),
12029                                                         noiselevel=-1)
12030                                         else:
12031                                                 writemsg_level(
12032                                                         colorize("GOOD", myversion + " "), noiselevel=-1)
12033                         else:
12034                                 writemsg_level("none ", noiselevel=-1)
12035                         if not quiet:
12036                                 writemsg_level("\n", noiselevel=-1)
12037                 if quiet:
12038                         writemsg_level("\n", noiselevel=-1)
12039
12040         writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
12041                 " packages are slated for removal.\n")
12042         writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
12043                         " and " + colorize("GOOD", "'omitted'") + \
12044                         " packages will not be removed.\n\n")
12045
12046         if "--pretend" in myopts:
12047                 #we're done... return
12048                 return 0
12049         if "--ask" in myopts:
12050                 if userquery("Would you like to unmerge these packages?")=="No":
12051                         # enter pretend mode for correct formatting of results
12052                         myopts["--pretend"] = True
12053                         print
12054                         print "Quitting."
12055                         print
12056                         return 0
12057         #the real unmerging begins, after a short delay....
12058         if clean_delay and not autoclean:
12059                 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
12060
12061         for x in xrange(len(pkgmap)):
12062                 for y in pkgmap[x]["selected"]:
12063                         writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
12064                         emergelog(xterm_titles, "=== Unmerging... ("+y+")")
12065                         mysplit = y.split("/")
12066                         #unmerge...
12067                         retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
12068                                 mysettings, unmerge_action not in ["clean","prune"],
12069                                 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
12070                                 scheduler=scheduler)
12071
12072                         if retval != os.EX_OK:
12073                                 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
12074                                 if raise_on_error:
12075                                         raise UninstallFailure(retval)
12076                                 sys.exit(retval)
12077                         else:
12078                                 if clean_world and hasattr(sets["world"], "cleanPackage"):
12079                                         sets["world"].cleanPackage(vartree.dbapi, y)
12080                                 emergelog(xterm_titles, " >>> unmerge success: "+y)
12081         if clean_world and hasattr(sets["world"], "remove"):
12082                 for s in root_config.setconfig.active:
12083                         sets["world"].remove(SETPREFIX+s)
12084         return 1
12085
12086 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
12087
12088         if os.path.exists("/usr/bin/install-info"):
12089                 out = portage.output.EOutput()
12090                 regen_infodirs=[]
12091                 for z in infodirs:
12092                         if z=='':
12093                                 continue
12094                         inforoot=normpath(root+z)
12095                         if os.path.isdir(inforoot):
12096                                 infomtime = long(os.stat(inforoot).st_mtime)
12097                                 if inforoot not in prev_mtimes or \
12098                                         prev_mtimes[inforoot] != infomtime:
12099                                                 regen_infodirs.append(inforoot)
12100
12101                 if not regen_infodirs:
12102                         portage.writemsg_stdout("\n")
12103                         out.einfo("GNU info directory index is up-to-date.")
12104                 else:
12105                         portage.writemsg_stdout("\n")
12106                         out.einfo("Regenerating GNU info directory index...")
12107
12108                         dir_extensions = ("", ".gz", ".bz2")
12109                         icount=0
12110                         badcount=0
12111                         errmsg = ""
12112                         for inforoot in regen_infodirs:
12113                                 if inforoot=='':
12114                                         continue
12115
12116                                 if not os.path.isdir(inforoot) or \
12117                                         not os.access(inforoot, os.W_OK):
12118                                         continue
12119
12120                                 file_list = os.listdir(inforoot)
12121                                 file_list.sort()
12122                                 dir_file = os.path.join(inforoot, "dir")
12123                                 moved_old_dir = False
12124                                 processed_count = 0
12125                                 for x in file_list:
12126                                         if x.startswith(".") or \
12127                                                 os.path.isdir(os.path.join(inforoot, x)):
12128                                                 continue
12129                                         if x.startswith("dir"):
12130                                                 skip = False
12131                                                 for ext in dir_extensions:
12132                                                         if x == "dir" + ext or \
12133                                                                 x == "dir" + ext + ".old":
12134                                                                 skip = True
12135                                                                 break
12136                                                 if skip:
12137                                                         continue
12138                                         if processed_count == 0:
12139                                                 for ext in dir_extensions:
12140                                                         try:
12141                                                                 os.rename(dir_file + ext, dir_file + ext + ".old")
12142                                                                 moved_old_dir = True
12143                                                         except EnvironmentError, e:
12144                                                                 if e.errno != errno.ENOENT:
12145                                                                         raise
12146                                                                 del e
12147                                         processed_count += 1
12148                                         myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
12149                                         existsstr="already exists, for file `"
12150                                         if myso!="":
12151                                                 if re.search(existsstr,myso):
12152                                                         # Already exists... Don't increment the count for this.
12153                                                         pass
12154                                                 elif myso[:44]=="install-info: warning: no info dir entry in ":
12155                                                         # This info file doesn't contain a DIR-header: install-info produces this
12156                                                         # (harmless) warning (the --quiet switch doesn't seem to work).
12157                                                         # Don't increment the count for this.
12158                                                         pass
12159                                                 else:
12160                                                         badcount=badcount+1
12161                                                         errmsg += myso + "\n"
12162                                         icount=icount+1
12163
12164                                 if moved_old_dir and not os.path.exists(dir_file):
12165                                         # We didn't generate a new dir file, so put the old file
12166                                         # back where it was originally found.
12167                                         for ext in dir_extensions:
12168                                                 try:
12169                                                         os.rename(dir_file + ext + ".old", dir_file + ext)
12170                                                 except EnvironmentError, e:
12171                                                         if e.errno != errno.ENOENT:
12172                                                                 raise
12173                                                         del e
12174
12175                                 # Clean dir.old cruft so that they don't prevent
12176                                 # unmerge of otherwise empty directories.
12177                                 for ext in dir_extensions:
12178                                         try:
12179                                                 os.unlink(dir_file + ext + ".old")
12180                                         except EnvironmentError, e:
12181                                                 if e.errno != errno.ENOENT:
12182                                                         raise
12183                                                 del e
12184
12185                                 #update mtime so we can potentially avoid regenerating.
12186                                 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
12187
12188                         if badcount:
12189                                 out.eerror("Processed %d info files; %d errors." % \
12190                                         (icount, badcount))
12191                                 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
12192                         else:
12193                                 if icount > 0:
12194                                         out.einfo("Processed %d info files." % (icount,))
12195
12196
12197 def display_news_notification(root_config, myopts):
12198         target_root = root_config.root
12199         trees = root_config.trees
12200         settings = trees["vartree"].settings
12201         portdb = trees["porttree"].dbapi
12202         vardb = trees["vartree"].dbapi
12203         NEWS_PATH = os.path.join("metadata", "news")
12204         UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
12205         newsReaderDisplay = False
12206         update = "--pretend" not in myopts
12207
12208         for repo in portdb.getRepositories():
12209                 unreadItems = checkUpdatedNewsItems(
12210                         portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
12211                 if unreadItems:
12212                         if not newsReaderDisplay:
12213                                 newsReaderDisplay = True
12214                                 print
12215                         print colorize("WARN", " * IMPORTANT:"),
12216                         print "%s news items need reading for repository '%s'." % (unreadItems, repo)
12217                         
12218         
12219         if newsReaderDisplay:
12220                 print colorize("WARN", " *"),
12221                 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
12222                 print
12223
12224 def display_preserved_libs(vardbapi):
12225         MAX_DISPLAY = 3
12226
12227         # Ensure the registry is consistent with existing files.
12228         vardbapi.plib_registry.pruneNonExisting()
12229
12230         if vardbapi.plib_registry.hasEntries():
12231                 print
12232                 print colorize("WARN", "!!!") + " existing preserved libs:"
12233                 plibdata = vardbapi.plib_registry.getPreservedLibs()
12234                 linkmap = vardbapi.linkmap
12235                 consumer_map = {}
12236                 owners = {}
12237                 linkmap_broken = False
12238
12239                 try:
12240                         linkmap.rebuild()
12241                 except portage.exception.CommandNotFound, e:
12242                         writemsg_level("!!! Command Not Found: %s\n" % (e,),
12243                                 level=logging.ERROR, noiselevel=-1)
12244                         del e
12245                         linkmap_broken = True
12246                 else:
12247                         search_for_owners = set()
12248                         for cpv in plibdata:
12249                                 internal_plib_keys = set(linkmap._obj_key(f) \
12250                                         for f in plibdata[cpv])
12251                                 for f in plibdata[cpv]:
12252                                         if f in consumer_map:
12253                                                 continue
12254                                         consumers = []
12255                                         for c in linkmap.findConsumers(f):
12256                                                 # Filter out any consumers that are also preserved libs
12257                                                 # belonging to the same package as the provider.
12258                                                 if linkmap._obj_key(c) not in internal_plib_keys:
12259                                                         consumers.append(c)
12260                                         consumers.sort()
12261                                         consumer_map[f] = consumers
12262                                         search_for_owners.update(consumers[:MAX_DISPLAY+1])
12263
12264                         owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
12265
12266                 for cpv in plibdata:
12267                         print colorize("WARN", ">>>") + " package: %s" % cpv
12268                         samefile_map = {}
12269                         for f in plibdata[cpv]:
12270                                 obj_key = linkmap._obj_key(f)
12271                                 alt_paths = samefile_map.get(obj_key)
12272                                 if alt_paths is None:
12273                                         alt_paths = set()
12274                                         samefile_map[obj_key] = alt_paths
12275                                 alt_paths.add(f)
12276
12277                         for alt_paths in samefile_map.itervalues():
12278                                 alt_paths = sorted(alt_paths)
12279                                 for p in alt_paths:
12280                                         print colorize("WARN", " * ") + " - %s" % (p,)
12281                                 f = alt_paths[0]
12282                                 consumers = consumer_map.get(f, [])
12283                                 for c in consumers[:MAX_DISPLAY]:
12284                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12285                                                 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
12286                                 if len(consumers) == MAX_DISPLAY + 1:
12287                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12288                                                 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
12289                                                 for x in owners.get(consumers[MAX_DISPLAY], [])))
12290                                 elif len(consumers) > MAX_DISPLAY:
12291                                         print colorize("WARN", " * ") + "     used by %d other files" % (len(consumers) - MAX_DISPLAY)
12292                 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
12293
12294
12295 def _flush_elog_mod_echo():
12296         """
12297         Dump the mod_echo output now so that our other
12298         notifications are shown last.
12299         @rtype: bool
12300         @returns: True if messages were shown, False otherwise.
12301         """
12302         messages_shown = False
12303         try:
12304                 from portage.elog import mod_echo
12305         except ImportError:
12306                 pass # happens during downgrade to a version without the module
12307         else:
12308                 messages_shown = bool(mod_echo._items)
12309                 mod_echo.finalize()
12310         return messages_shown
12311
12312 def post_emerge(root_config, myopts, mtimedb, retval):
12313         """
12314         Misc. things to run at the end of a merge session.
12315         
12316         Update Info Files
12317         Update Config Files
12318         Update News Items
12319         Commit mtimeDB
12320         Display preserved libs warnings
12321         Exit Emerge
12322
12323         @param trees: A dictionary mapping each ROOT to it's package databases
12324         @type trees: dict
12325         @param mtimedb: The mtimeDB to store data needed across merge invocations
12326         @type mtimedb: MtimeDB class instance
12327         @param retval: Emerge's return value
12328         @type retval: Int
12329         @rype: None
12330         @returns:
12331         1.  Calls sys.exit(retval)
12332         """
12333
12334         target_root = root_config.root
12335         trees = { target_root : root_config.trees }
12336         vardbapi = trees[target_root]["vartree"].dbapi
12337         settings = vardbapi.settings
12338         info_mtimes = mtimedb["info"]
12339
12340         # Load the most current variables from ${ROOT}/etc/profile.env
12341         settings.unlock()
12342         settings.reload()
12343         settings.regenerate()
12344         settings.lock()
12345
12346         config_protect = settings.get("CONFIG_PROTECT","").split()
12347         infodirs = settings.get("INFOPATH","").split(":") + \
12348                 settings.get("INFODIR","").split(":")
12349
12350         os.chdir("/")
12351
12352         if retval == os.EX_OK:
12353                 exit_msg = " *** exiting successfully."
12354         else:
12355                 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
12356         emergelog("notitles" not in settings.features, exit_msg)
12357
12358         _flush_elog_mod_echo()
12359
12360         counter_hash = settings.get("PORTAGE_COUNTER_HASH")
12361         if "--pretend" in myopts or (counter_hash is not None and \
12362                 counter_hash == vardbapi._counter_hash()):
12363                 display_news_notification(root_config, myopts)
12364                 # If vdb state has not changed then there's nothing else to do.
12365                 sys.exit(retval)
12366
12367         vdb_path = os.path.join(target_root, portage.VDB_PATH)
12368         portage.util.ensure_dirs(vdb_path)
12369         vdb_lock = None
12370         if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
12371                 vdb_lock = portage.locks.lockdir(vdb_path)
12372
12373         if vdb_lock:
12374                 try:
12375                         if "noinfo" not in settings.features:
12376                                 chk_updated_info_files(target_root,
12377                                         infodirs, info_mtimes, retval)
12378                         mtimedb.commit()
12379                 finally:
12380                         if vdb_lock:
12381                                 portage.locks.unlockdir(vdb_lock)
12382
12383         chk_updated_cfg_files(target_root, config_protect)
12384         
12385         display_news_notification(root_config, myopts)
12386         if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
12387                 display_preserved_libs(vardbapi)        
12388
12389         sys.exit(retval)
12390
12391
12392 def chk_updated_cfg_files(target_root, config_protect):
12393         if config_protect:
12394                 #number of directories with some protect files in them
12395                 procount=0
12396                 for x in config_protect:
12397                         x = os.path.join(target_root, x.lstrip(os.path.sep))
12398                         if not os.access(x, os.W_OK):
12399                                 # Avoid Permission denied errors generated
12400                                 # later by `find`.
12401                                 continue
12402                         try:
12403                                 mymode = os.lstat(x).st_mode
12404                         except OSError:
12405                                 continue
12406                         if stat.S_ISLNK(mymode):
12407                                 # We want to treat it like a directory if it
12408                                 # is a symlink to an existing directory.
12409                                 try:
12410                                         real_mode = os.stat(x).st_mode
12411                                         if stat.S_ISDIR(real_mode):
12412                                                 mymode = real_mode
12413                                 except OSError:
12414                                         pass
12415                         if stat.S_ISDIR(mymode):
12416                                 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
12417                         else:
12418                                 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
12419                                         os.path.split(x.rstrip(os.path.sep))
12420                         mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
12421                         a = commands.getstatusoutput(mycommand)
12422                         if a[0] != 0:
12423                                 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
12424                                 sys.stderr.flush()
12425                                 # Show the error message alone, sending stdout to /dev/null.
12426                                 os.system(mycommand + " 1>/dev/null")
12427                         else:
12428                                 files = a[1].split('\0')
12429                                 # split always produces an empty string as the last element
12430                                 if files and not files[-1]:
12431                                         del files[-1]
12432                                 if files:
12433                                         procount += 1
12434                                         print "\n"+colorize("WARN", " * IMPORTANT:"),
12435                                         if stat.S_ISDIR(mymode):
12436                                                  print "%d config files in '%s' need updating." % \
12437                                                         (len(files), x)
12438                                         else:
12439                                                  print "config file '%s' needs updating." % x
12440
12441                 if procount:
12442                         print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
12443                                 " section of the " + bold("emerge")
12444                         print " "+yellow("*")+" man page to learn how to update config files."
12445
12446 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12447         update=False):
12448         """
12449         Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12450         Returns the number of unread (yet relevent) items.
12451         
12452         @param portdb: a portage tree database
12453         @type portdb: pordbapi
12454         @param vardb: an installed package database
12455         @type vardb: vardbapi
12456         @param NEWS_PATH:
12457         @type NEWS_PATH:
12458         @param UNREAD_PATH:
12459         @type UNREAD_PATH:
12460         @param repo_id:
12461         @type repo_id:
12462         @rtype: Integer
12463         @returns:
12464         1.  The number of unread but relevant news items.
12465         
12466         """
12467         from portage.news import NewsManager
12468         manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12469         return manager.getUnreadItems( repo_id, update=update )
12470
12471 def insert_category_into_atom(atom, category):
12472         alphanum = re.search(r'\w', atom)
12473         if alphanum:
12474                 ret = atom[:alphanum.start()] + "%s/" % category + \
12475                         atom[alphanum.start():]
12476         else:
12477                 ret = None
12478         return ret
12479
12480 def is_valid_package_atom(x):
12481         if "/" not in x:
12482                 alphanum = re.search(r'\w', x)
12483                 if alphanum:
12484                         x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12485         return portage.isvalidatom(x)
12486
12487 def show_blocker_docs_link():
12488         print
12489         print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12490         print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12491         print
12492         print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12493         print
12494
12495 def show_mask_docs():
12496         print "For more information, see the MASKED PACKAGES section in the emerge"
12497         print "man page or refer to the Gentoo Handbook."
12498
12499 def action_sync(settings, trees, mtimedb, myopts, myaction):
12500         xterm_titles = "notitles" not in settings.features
12501         emergelog(xterm_titles, " === sync")
12502         myportdir = settings.get("PORTDIR", None)
12503         out = portage.output.EOutput()
12504         if not myportdir:
12505                 sys.stderr.write("!!! PORTDIR is undefined.  Is /etc/make.globals missing?\n")
12506                 sys.exit(1)
12507         if myportdir[-1]=="/":
12508                 myportdir=myportdir[:-1]
12509         try:
12510                 st = os.stat(myportdir)
12511         except OSError:
12512                 st = None
12513         if st is None:
12514                 print ">>>",myportdir,"not found, creating it."
12515                 os.makedirs(myportdir,0755)
12516                 st = os.stat(myportdir)
12517
12518         spawn_kwargs = {}
12519         spawn_kwargs["env"] = settings.environ()
12520         if 'usersync' in settings.features and \
12521                 portage.data.secpass >= 2 and \
12522                 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12523                 st.st_gid != os.getgid() and st.st_mode & 0070):
12524                 try:
12525                         homedir = pwd.getpwuid(st.st_uid).pw_dir
12526                 except KeyError:
12527                         pass
12528                 else:
12529                         # Drop privileges when syncing, in order to match
12530                         # existing uid/gid settings.
12531                         spawn_kwargs["uid"]    = st.st_uid
12532                         spawn_kwargs["gid"]    = st.st_gid
12533                         spawn_kwargs["groups"] = [st.st_gid]
12534                         spawn_kwargs["env"]["HOME"] = homedir
12535                         umask = 0002
12536                         if not st.st_mode & 0020:
12537                                 umask = umask | 0020
12538                         spawn_kwargs["umask"] = umask
12539
12540         syncuri = settings.get("SYNC", "").strip()
12541         if not syncuri:
12542                 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12543                         noiselevel=-1, level=logging.ERROR)
12544                 return 1
12545
12546         vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12547         vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12548
12549         os.umask(0022)
12550         dosyncuri = syncuri
12551         updatecache_flg = False
12552         if myaction == "metadata":
12553                 print "skipping sync"
12554                 updatecache_flg = True
12555         elif ".git" in vcs_dirs:
12556                 # Update existing git repository, and ignore the syncuri. We are
12557                 # going to trust the user and assume that the user is in the branch
12558                 # that he/she wants updated. We'll let the user manage branches with
12559                 # git directly.
12560                 if portage.process.find_binary("git") is None:
12561                         msg = ["Command not found: git",
12562                         "Type \"emerge dev-util/git\" to enable git support."]
12563                         for l in msg:
12564                                 writemsg_level("!!! %s\n" % l,
12565                                         level=logging.ERROR, noiselevel=-1)
12566                         return 1
12567                 msg = ">>> Starting git pull in %s..." % myportdir
12568                 emergelog(xterm_titles, msg )
12569                 writemsg_level(msg + "\n")
12570                 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12571                         (portage._shell_quote(myportdir),), **spawn_kwargs)
12572                 if exitcode != os.EX_OK:
12573                         msg = "!!! git pull error in %s." % myportdir
12574                         emergelog(xterm_titles, msg)
12575                         writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12576                         return exitcode
12577                 msg = ">>> Git pull in %s successful" % myportdir
12578                 emergelog(xterm_titles, msg)
12579                 writemsg_level(msg + "\n")
12580                 exitcode = git_sync_timestamps(settings, myportdir)
12581                 if exitcode == os.EX_OK:
12582                         updatecache_flg = True
12583         elif syncuri[:8]=="rsync://":
12584                 for vcs_dir in vcs_dirs:
12585                         writemsg_level(("!!! %s appears to be under revision " + \
12586                                 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12587                                 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12588                         return 1
12589                 if not os.path.exists("/usr/bin/rsync"):
12590                         print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12591                         print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12592                         sys.exit(1)
12593                 mytimeout=180
12594
12595                 rsync_opts = []
12596                 if settings["PORTAGE_RSYNC_OPTS"] == "":
12597                         portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12598                         rsync_opts.extend([
12599                                 "--recursive",    # Recurse directories
12600                                 "--links",        # Consider symlinks
12601                                 "--safe-links",   # Ignore links outside of tree
12602                                 "--perms",        # Preserve permissions
12603                                 "--times",        # Preserive mod times
12604                                 "--compress",     # Compress the data transmitted
12605                                 "--force",        # Force deletion on non-empty dirs
12606                                 "--whole-file",   # Don't do block transfers, only entire files
12607                                 "--delete",       # Delete files that aren't in the master tree
12608                                 "--stats",        # Show final statistics about what was transfered
12609                                 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12610                                 "--exclude=/distfiles",   # Exclude distfiles from consideration
12611                                 "--exclude=/local",       # Exclude local     from consideration
12612                                 "--exclude=/packages",    # Exclude packages  from consideration
12613                         ])
12614
12615                 else:
12616                         # The below validation is not needed when using the above hardcoded
12617                         # defaults.
12618
12619                         portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12620                         rsync_opts.extend(
12621                                 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
12622                         for opt in ("--recursive", "--times"):
12623                                 if opt not in rsync_opts:
12624                                         portage.writemsg(yellow("WARNING:") + " adding required option " + \
12625                                         "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12626                                         rsync_opts.append(opt)
12627         
12628                         for exclude in ("distfiles", "local", "packages"):
12629                                 opt = "--exclude=/%s" % exclude
12630                                 if opt not in rsync_opts:
12631                                         portage.writemsg(yellow("WARNING:") + \
12632                                         " adding required option %s not included in "  % opt + \
12633                                         "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12634                                         rsync_opts.append(opt)
12635         
12636                         if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12637                                 def rsync_opt_startswith(opt_prefix):
12638                                         for x in rsync_opts:
12639                                                 if x.startswith(opt_prefix):
12640                                                         return True
12641                                         return False
12642
12643                                 if not rsync_opt_startswith("--timeout="):
12644                                         rsync_opts.append("--timeout=%d" % mytimeout)
12645
12646                                 for opt in ("--compress", "--whole-file"):
12647                                         if opt not in rsync_opts:
12648                                                 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12649                                                 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12650                                                 rsync_opts.append(opt)
12651
12652                 if "--quiet" in myopts:
12653                         rsync_opts.append("--quiet")    # Shut up a lot
12654                 else:
12655                         rsync_opts.append("--verbose")  # Print filelist
12656
12657                 if "--verbose" in myopts:
12658                         rsync_opts.append("--progress")  # Progress meter for each file
12659
12660                 if "--debug" in myopts:
12661                         rsync_opts.append("--checksum") # Force checksum on all files
12662
12663                 # Real local timestamp file.
12664                 servertimestampfile = os.path.join(
12665                         myportdir, "metadata", "timestamp.chk")
12666
12667                 content = portage.util.grabfile(servertimestampfile)
12668                 mytimestamp = 0
12669                 if content:
12670                         try:
12671                                 mytimestamp = time.mktime(time.strptime(content[0],
12672                                         "%a, %d %b %Y %H:%M:%S +0000"))
12673                         except (OverflowError, ValueError):
12674                                 pass
12675                 del content
12676
12677                 try:
12678                         rsync_initial_timeout = \
12679                                 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12680                 except ValueError:
12681                         rsync_initial_timeout = 15
12682
12683                 try:
12684                         maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12685                 except SystemExit, e:
12686                         raise # Needed else can't exit
12687                 except:
12688                         maxretries=3 #default number of retries
12689
12690                 retries=0
12691                 user_name, hostname, port = re.split(
12692                         "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12693                 if port is None:
12694                         port=""
12695                 if user_name is None:
12696                         user_name=""
12697                 updatecache_flg=True
12698                 all_rsync_opts = set(rsync_opts)
12699                 extra_rsync_opts = shlex.split(
12700                         settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
12701                 all_rsync_opts.update(extra_rsync_opts)
12702                 family = socket.AF_INET
12703                 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12704                         family = socket.AF_INET
12705                 elif socket.has_ipv6 and \
12706                         ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12707                         family = socket.AF_INET6
12708                 ips=[]
12709                 SERVER_OUT_OF_DATE = -1
12710                 EXCEEDED_MAX_RETRIES = -2
12711                 while (1):
12712                         if ips:
12713                                 del ips[0]
12714                         if ips==[]:
12715                                 try:
12716                                         for addrinfo in socket.getaddrinfo(
12717                                                 hostname, None, family, socket.SOCK_STREAM):
12718                                                 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12719                                                         # IPv6 addresses need to be enclosed in square brackets
12720                                                         ips.append("[%s]" % addrinfo[4][0])
12721                                                 else:
12722                                                         ips.append(addrinfo[4][0])
12723                                         from random import shuffle
12724                                         shuffle(ips)
12725                                 except SystemExit, e:
12726                                         raise # Needed else can't exit
12727                                 except Exception, e:
12728                                         print "Notice:",str(e)
12729                                         dosyncuri=syncuri
12730
12731                         if ips:
12732                                 try:
12733                                         dosyncuri = syncuri.replace(
12734                                                 "//" + user_name + hostname + port + "/",
12735                                                 "//" + user_name + ips[0] + port + "/", 1)
12736                                 except SystemExit, e:
12737                                         raise # Needed else can't exit
12738                                 except Exception, e:
12739                                         print "Notice:",str(e)
12740                                         dosyncuri=syncuri
12741
12742                         if (retries==0):
12743                                 if "--ask" in myopts:
12744                                         if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12745                                                 print
12746                                                 print "Quitting."
12747                                                 print
12748                                                 sys.exit(0)
12749                                 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12750                                 if "--quiet" not in myopts:
12751                                         print ">>> Starting rsync with "+dosyncuri+"..."
12752                         else:
12753                                 emergelog(xterm_titles,
12754                                         ">>> Starting retry %d of %d with %s" % \
12755                                                 (retries,maxretries,dosyncuri))
12756                                 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
12757
12758                         if mytimestamp != 0 and "--quiet" not in myopts:
12759                                 print ">>> Checking server timestamp ..."
12760
12761                         rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
12762
12763                         if "--debug" in myopts:
12764                                 print rsynccommand
12765
12766                         exitcode = os.EX_OK
12767                         servertimestamp = 0
12768                         # Even if there's no timestamp available locally, fetch the
12769                         # timestamp anyway as an initial probe to verify that the server is
12770                         # responsive.  This protects us from hanging indefinitely on a
12771                         # connection attempt to an unresponsive server which rsync's
12772                         # --timeout option does not prevent.
12773                         if True:
12774                                 # Temporary file for remote server timestamp comparison.
12775                                 from tempfile import mkstemp
12776                                 fd, tmpservertimestampfile = mkstemp()
12777                                 os.close(fd)
12778                                 mycommand = rsynccommand[:]
12779                                 mycommand.append(dosyncuri.rstrip("/") + \
12780                                         "/metadata/timestamp.chk")
12781                                 mycommand.append(tmpservertimestampfile)
12782                                 content = None
12783                                 mypids = []
12784                                 try:
12785                                         def timeout_handler(signum, frame):
12786                                                 raise portage.exception.PortageException("timed out")
12787                                         signal.signal(signal.SIGALRM, timeout_handler)
12788                                         # Timeout here in case the server is unresponsive.  The
12789                                         # --timeout rsync option doesn't apply to the initial
12790                                         # connection attempt.
12791                                         if rsync_initial_timeout:
12792                                                 signal.alarm(rsync_initial_timeout)
12793                                         try:
12794                                                 mypids.extend(portage.process.spawn(
12795                                                         mycommand, env=settings.environ(), returnpid=True))
12796                                                 exitcode = os.waitpid(mypids[0], 0)[1]
12797                                                 content = portage.grabfile(tmpservertimestampfile)
12798                                         finally:
12799                                                 if rsync_initial_timeout:
12800                                                         signal.alarm(0)
12801                                                 try:
12802                                                         os.unlink(tmpservertimestampfile)
12803                                                 except OSError:
12804                                                         pass
12805                                 except portage.exception.PortageException, e:
12806                                         # timed out
12807                                         print e
12808                                         del e
12809                                         if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
12810                                                 os.kill(mypids[0], signal.SIGTERM)
12811                                                 os.waitpid(mypids[0], 0)
12812                                         # This is the same code rsync uses for timeout.
12813                                         exitcode = 30
12814                                 else:
12815                                         if exitcode != os.EX_OK:
12816                                                 if exitcode & 0xff:
12817                                                         exitcode = (exitcode & 0xff) << 8
12818                                                 else:
12819                                                         exitcode = exitcode >> 8
12820                                 if mypids:
12821                                         portage.process.spawned_pids.remove(mypids[0])
12822                                 if content:
12823                                         try:
12824                                                 servertimestamp = time.mktime(time.strptime(
12825                                                         content[0], "%a, %d %b %Y %H:%M:%S +0000"))
12826                                         except (OverflowError, ValueError):
12827                                                 pass
12828                                 del mycommand, mypids, content
12829                         if exitcode == os.EX_OK:
12830                                 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
12831                                         emergelog(xterm_titles,
12832                                                 ">>> Cancelling sync -- Already current.")
12833                                         print
12834                                         print ">>>"
12835                                         print ">>> Timestamps on the server and in the local repository are the same."
12836                                         print ">>> Cancelling all further sync action. You are already up to date."
12837                                         print ">>>"
12838                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
12839                                         print ">>>"
12840                                         print
12841                                         sys.exit(0)
12842                                 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
12843                                         emergelog(xterm_titles,
12844                                                 ">>> Server out of date: %s" % dosyncuri)
12845                                         print
12846                                         print ">>>"
12847                                         print ">>> SERVER OUT OF DATE: %s" % dosyncuri
12848                                         print ">>>"
12849                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
12850                                         print ">>>"
12851                                         print
12852                                         exitcode = SERVER_OUT_OF_DATE
12853                                 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
12854                                         # actual sync
12855                                         mycommand = rsynccommand + [dosyncuri+"/", myportdir]
12856                                         exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
12857                                         if exitcode in [0,1,3,4,11,14,20,21]:
12858                                                 break
12859                         elif exitcode in [1,3,4,11,14,20,21]:
12860                                 break
12861                         else:
12862                                 # Code 2 indicates protocol incompatibility, which is expected
12863                                 # for servers with protocol < 29 that don't support
12864                                 # --prune-empty-directories.  Retry for a server that supports
12865                                 # at least rsync protocol version 29 (>=rsync-2.6.4).
12866                                 pass
12867
12868                         retries=retries+1
12869
12870                         if retries<=maxretries:
12871                                 print ">>> Retrying..."
12872                                 time.sleep(11)
12873                         else:
12874                                 # over retries
12875                                 # exit loop
12876                                 updatecache_flg=False
12877                                 exitcode = EXCEEDED_MAX_RETRIES
12878                                 break
12879
12880                 if (exitcode==0):
12881                         emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
12882                 elif exitcode == SERVER_OUT_OF_DATE:
12883                         sys.exit(1)
12884                 elif exitcode == EXCEEDED_MAX_RETRIES:
12885                         sys.stderr.write(
12886                                 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
12887                         sys.exit(1)
12888                 elif (exitcode>0):
12889                         msg = []
12890                         if exitcode==1:
12891                                 msg.append("Rsync has reported that there is a syntax error. Please ensure")
12892                                 msg.append("that your SYNC statement is proper.")
12893                                 msg.append("SYNC=" + settings["SYNC"])
12894                         elif exitcode==11:
12895                                 msg.append("Rsync has reported that there is a File IO error. Normally")
12896                                 msg.append("this means your disk is full, but can be caused by corruption")
12897                                 msg.append("on the filesystem that contains PORTDIR. Please investigate")
12898                                 msg.append("and try again after the problem has been fixed.")
12899                                 msg.append("PORTDIR=" + settings["PORTDIR"])
12900                         elif exitcode==20:
12901                                 msg.append("Rsync was killed before it finished.")
12902                         else:
12903                                 msg.append("Rsync has not successfully finished. It is recommended that you keep")
12904                                 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
12905                                 msg.append("to use rsync due to firewall or other restrictions. This should be a")
12906                                 msg.append("temporary problem unless complications exist with your network")
12907                                 msg.append("(and possibly your system's filesystem) configuration.")
12908                         for line in msg:
12909                                 out.eerror(line)
12910                         sys.exit(exitcode)
12911         elif syncuri[:6]=="cvs://":
12912                 if not os.path.exists("/usr/bin/cvs"):
12913                         print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
12914                         print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
12915                         sys.exit(1)
12916                 cvsroot=syncuri[6:]
12917                 cvsdir=os.path.dirname(myportdir)
12918                 if not os.path.exists(myportdir+"/CVS"):
12919                         #initial checkout
12920                         print ">>> Starting initial cvs checkout with "+syncuri+"..."
12921                         if os.path.exists(cvsdir+"/gentoo-x86"):
12922                                 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
12923                                 sys.exit(1)
12924                         try:
12925                                 os.rmdir(myportdir)
12926                         except OSError, e:
12927                                 if e.errno != errno.ENOENT:
12928                                         sys.stderr.write(
12929                                                 "!!! existing '%s' directory; exiting.\n" % myportdir)
12930                                         sys.exit(1)
12931                                 del e
12932                         if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
12933                                 print "!!! cvs checkout error; exiting."
12934                                 sys.exit(1)
12935                         os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
12936                 else:
12937                         #cvs update
12938                         print ">>> Starting cvs update with "+syncuri+"..."
12939                         retval = portage.process.spawn_bash(
12940                                 "cd %s; cvs -z0 -q update -dP" % \
12941                                 (portage._shell_quote(myportdir),), **spawn_kwargs)
12942                         if retval != os.EX_OK:
12943                                 sys.exit(retval)
12944                 dosyncuri = syncuri
12945         else:
12946                 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
12947                         noiselevel=-1, level=logging.ERROR)
12948                 return 1
12949
12950         if updatecache_flg and  \
12951                 myaction != "metadata" and \
12952                 "metadata-transfer" not in settings.features:
12953                 updatecache_flg = False
12954
12955         # Reload the whole config from scratch.
12956         settings, trees, mtimedb = load_emerge_config(trees=trees)
12957         root_config = trees[settings["ROOT"]]["root_config"]
12958         portdb = trees[settings["ROOT"]]["porttree"].dbapi
12959
12960         if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
12961                 action_metadata(settings, portdb, myopts)
12962
12963         if portage._global_updates(trees, mtimedb["updates"]):
12964                 mtimedb.commit()
12965                 # Reload the whole config from scratch.
12966                 settings, trees, mtimedb = load_emerge_config(trees=trees)
12967                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12968                 root_config = trees[settings["ROOT"]]["root_config"]
12969
12970         mybestpv = portdb.xmatch("bestmatch-visible",
12971                 portage.const.PORTAGE_PACKAGE_ATOM)
12972         mypvs = portage.best(
12973                 trees[settings["ROOT"]]["vartree"].dbapi.match(
12974                 portage.const.PORTAGE_PACKAGE_ATOM))
12975
12976         chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
12977
12978         if myaction != "metadata":
12979                 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
12980                         retval = portage.process.spawn(
12981                                 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
12982                                 dosyncuri], env=settings.environ())
12983                         if retval != os.EX_OK:
12984                                 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
12985
12986         if(mybestpv != mypvs) and not "--quiet" in myopts:
12987                 print
12988                 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
12989                 print red(" * ")+"that you update portage now, before any other packages are updated."
12990                 print
12991                 print red(" * ")+"To update portage, run 'emerge portage' now."
12992                 print
12993         
12994         display_news_notification(root_config, myopts)
12995         return os.EX_OK
12996
12997 def git_sync_timestamps(settings, portdir):
12998         """
12999         Since git doesn't preserve timestamps, synchronize timestamps between
13000         entries and ebuilds/eclasses. Assume the cache has the correct timestamp
13001         for a given file as long as the file in the working tree is not modified
13002         (relative to HEAD).
13003         """
13004         cache_dir = os.path.join(portdir, "metadata", "cache")
13005         if not os.path.isdir(cache_dir):
13006                 return os.EX_OK
13007         writemsg_level(">>> Synchronizing timestamps...\n")
13008
13009         from portage.cache.cache_errors import CacheError
13010         try:
13011                 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
13012                         portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13013         except CacheError, e:
13014                 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
13015                         level=logging.ERROR, noiselevel=-1)
13016                 return 1
13017
13018         ec_dir = os.path.join(portdir, "eclass")
13019         try:
13020                 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
13021                         if f.endswith(".eclass"))
13022         except OSError, e:
13023                 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
13024                         level=logging.ERROR, noiselevel=-1)
13025                 return 1
13026
13027         args = [portage.const.BASH_BINARY, "-c",
13028                 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
13029                 portage._shell_quote(portdir)]
13030         import subprocess
13031         proc = subprocess.Popen(args, stdout=subprocess.PIPE)
13032         modified_files = set(l.rstrip("\n") for l in proc.stdout)
13033         rval = proc.wait()
13034         if rval != os.EX_OK:
13035                 return rval
13036
13037         modified_eclasses = set(ec for ec in ec_names \
13038                 if os.path.join("eclass", ec + ".eclass") in modified_files)
13039
13040         updated_ec_mtimes = {}
13041
13042         for cpv in cache_db:
13043                 cpv_split = portage.catpkgsplit(cpv)
13044                 if cpv_split is None:
13045                         writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
13046                                 level=logging.ERROR, noiselevel=-1)
13047                         continue
13048
13049                 cat, pn, ver, rev = cpv_split
13050                 cat, pf = portage.catsplit(cpv)
13051                 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
13052                 if relative_eb_path in modified_files:
13053                         continue
13054
13055                 try:
13056                         cache_entry = cache_db[cpv]
13057                         eb_mtime = cache_entry.get("_mtime_")
13058                         ec_mtimes = cache_entry.get("_eclasses_")
13059                 except KeyError:
13060                         writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
13061                                 level=logging.ERROR, noiselevel=-1)
13062                         continue
13063                 except CacheError, e:
13064                         writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
13065                                 (cpv, e), level=logging.ERROR, noiselevel=-1)
13066                         continue
13067
13068                 if eb_mtime is None:
13069                         writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
13070                                 level=logging.ERROR, noiselevel=-1)
13071                         continue
13072
13073                 try:
13074                         eb_mtime = long(eb_mtime)
13075                 except ValueError:
13076                         writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
13077                                 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
13078                         continue
13079
13080                 if ec_mtimes is None:
13081                         writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
13082                                 level=logging.ERROR, noiselevel=-1)
13083                         continue
13084
13085                 if modified_eclasses.intersection(ec_mtimes):
13086                         continue
13087
13088                 missing_eclasses = set(ec_mtimes).difference(ec_names)
13089                 if missing_eclasses:
13090                         writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
13091                                 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
13092                                 noiselevel=-1)
13093                         continue
13094
13095                 eb_path = os.path.join(portdir, relative_eb_path)
13096                 try:
13097                         current_eb_mtime = os.stat(eb_path)
13098                 except OSError:
13099                         writemsg_level("!!! Missing ebuild: %s\n" % \
13100                                 (cpv,), level=logging.ERROR, noiselevel=-1)
13101                         continue
13102
13103                 inconsistent = False
13104                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13105                         updated_mtime = updated_ec_mtimes.get(ec)
13106                         if updated_mtime is not None and updated_mtime != ec_mtime:
13107                                 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
13108                                         (cpv, ec), level=logging.ERROR, noiselevel=-1)
13109                                 inconsistent = True
13110                                 break
13111
13112                 if inconsistent:
13113                         continue
13114
13115                 if current_eb_mtime != eb_mtime:
13116                         os.utime(eb_path, (eb_mtime, eb_mtime))
13117
13118                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13119                         if ec in updated_ec_mtimes:
13120                                 continue
13121                         ec_path = os.path.join(ec_dir, ec + ".eclass")
13122                         current_mtime = long(os.stat(ec_path).st_mtime)
13123                         if current_mtime != ec_mtime:
13124                                 os.utime(ec_path, (ec_mtime, ec_mtime))
13125                         updated_ec_mtimes[ec] = ec_mtime
13126
13127         return os.EX_OK
13128
13129 def action_metadata(settings, portdb, myopts):
13130         portage.writemsg_stdout("\n>>> Updating Portage cache:      ")
13131         old_umask = os.umask(0002)
13132         cachedir = os.path.normpath(settings.depcachedir)
13133         if cachedir in ["/",    "/bin", "/dev",  "/etc",  "/home",
13134                                         "/lib", "/opt", "/proc", "/root", "/sbin",
13135                                         "/sys", "/tmp", "/usr",  "/var"]:
13136                 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
13137                         "ROOT DIRECTORY ON YOUR SYSTEM."
13138                 print >> sys.stderr, \
13139                         "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
13140                 sys.exit(73)
13141         if not os.path.exists(cachedir):
13142                 os.mkdir(cachedir)
13143
13144         ec = portage.eclass_cache.cache(portdb.porttree_root)
13145         myportdir = os.path.realpath(settings["PORTDIR"])
13146         cm = settings.load_best_module("portdbapi.metadbmodule")(
13147                 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13148
13149         from portage.cache import util
13150
13151         class percentage_noise_maker(util.quiet_mirroring):
13152                 def __init__(self, dbapi):
13153                         self.dbapi = dbapi
13154                         self.cp_all = dbapi.cp_all()
13155                         l = len(self.cp_all)
13156                         self.call_update_min = 100000000
13157                         self.min_cp_all = l/100.0
13158                         self.count = 1
13159                         self.pstr = ''
13160
13161                 def __iter__(self):
13162                         for x in self.cp_all:
13163                                 self.count += 1
13164                                 if self.count > self.min_cp_all:
13165                                         self.call_update_min = 0
13166                                         self.count = 0
13167                                 for y in self.dbapi.cp_list(x):
13168                                         yield y
13169                         self.call_update_mine = 0
13170
13171                 def update(self, *arg):
13172                         try:                            self.pstr = int(self.pstr) + 1
13173                         except ValueError:      self.pstr = 1
13174                         sys.stdout.write("%s%i%%" % \
13175                                 ("\b" * (len(str(self.pstr))+1), self.pstr))
13176                         sys.stdout.flush()
13177                         self.call_update_min = 10000000
13178
13179                 def finish(self, *arg):
13180                         sys.stdout.write("\b\b\b\b100%\n")
13181                         sys.stdout.flush()
13182
13183         if "--quiet" in myopts:
13184                 def quicky_cpv_generator(cp_all_list):
13185                         for x in cp_all_list:
13186                                 for y in portdb.cp_list(x):
13187                                         yield y
13188                 source = quicky_cpv_generator(portdb.cp_all())
13189                 noise_maker = portage.cache.util.quiet_mirroring()
13190         else:
13191                 noise_maker = source = percentage_noise_maker(portdb)
13192         portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
13193                 eclass_cache=ec, verbose_instance=noise_maker)
13194
13195         sys.stdout.flush()
13196         os.umask(old_umask)
13197
13198 def action_regen(settings, portdb, max_jobs, max_load):
13199         xterm_titles = "notitles" not in settings.features
13200         emergelog(xterm_titles, " === regen")
13201         #regenerate cache entries
13202         portage.writemsg_stdout("Regenerating cache entries...\n")
13203         try:
13204                 os.close(sys.stdin.fileno())
13205         except SystemExit, e:
13206                 raise # Needed else can't exit
13207         except:
13208                 pass
13209         sys.stdout.flush()
13210
13211         regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
13212         regen.run()
13213
13214         portage.writemsg_stdout("done!\n")
13215         return regen.returncode
13216
13217 def action_config(settings, trees, myopts, myfiles):
13218         if len(myfiles) != 1:
13219                 print red("!!! config can only take a single package atom at this time\n")
13220                 sys.exit(1)
13221         if not is_valid_package_atom(myfiles[0]):
13222                 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
13223                         noiselevel=-1)
13224                 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
13225                 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
13226                 sys.exit(1)
13227         print
13228         try:
13229                 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
13230         except portage.exception.AmbiguousPackageName, e:
13231                 # Multiple matches thrown from cpv_expand
13232                 pkgs = e.args[0]
13233         if len(pkgs) == 0:
13234                 print "No packages found.\n"
13235                 sys.exit(0)
13236         elif len(pkgs) > 1:
13237                 if "--ask" in myopts:
13238                         options = []
13239                         print "Please select a package to configure:"
13240                         idx = 0
13241                         for pkg in pkgs:
13242                                 idx += 1
13243                                 options.append(str(idx))
13244                                 print options[-1]+") "+pkg
13245                         print "X) Cancel"
13246                         options.append("X")
13247                         idx = userquery("Selection?", options)
13248                         if idx == "X":
13249                                 sys.exit(0)
13250                         pkg = pkgs[int(idx)-1]
13251                 else:
13252                         print "The following packages available:"
13253                         for pkg in pkgs:
13254                                 print "* "+pkg
13255                         print "\nPlease use a specific atom or the --ask option."
13256                         sys.exit(1)
13257         else:
13258                 pkg = pkgs[0]
13259
13260         print
13261         if "--ask" in myopts:
13262                 if userquery("Ready to configure "+pkg+"?") == "No":
13263                         sys.exit(0)
13264         else:
13265                 print "Configuring pkg..."
13266         print
13267         ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
13268         mysettings = portage.config(clone=settings)
13269         vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
13270         debug = mysettings.get("PORTAGE_DEBUG") == "1"
13271         retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
13272                 mysettings,
13273                 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
13274                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
13275         if retval == os.EX_OK:
13276                 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
13277                         mysettings, debug=debug, mydbapi=vardb, tree="vartree")
13278         print
13279
13280 def action_info(settings, trees, myopts, myfiles):
13281         print getportageversion(settings["PORTDIR"], settings["ROOT"],
13282                 settings.profile_path, settings["CHOST"],
13283                 trees[settings["ROOT"]]["vartree"].dbapi)
13284         header_width = 65
13285         header_title = "System Settings"
13286         if myfiles:
13287                 print header_width * "="
13288                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13289         print header_width * "="
13290         print "System uname: "+platform.platform(aliased=1)
13291
13292         lastSync = portage.grabfile(os.path.join(
13293                 settings["PORTDIR"], "metadata", "timestamp.chk"))
13294         print "Timestamp of tree:",
13295         if lastSync:
13296                 print lastSync[0]
13297         else:
13298                 print "Unknown"
13299
13300         output=commands.getstatusoutput("distcc --version")
13301         if not output[0]:
13302                 print str(output[1].split("\n",1)[0]),
13303                 if "distcc" in settings.features:
13304                         print "[enabled]"
13305                 else:
13306                         print "[disabled]"
13307
13308         output=commands.getstatusoutput("ccache -V")
13309         if not output[0]:
13310                 print str(output[1].split("\n",1)[0]),
13311                 if "ccache" in settings.features:
13312                         print "[enabled]"
13313                 else:
13314                         print "[disabled]"
13315
13316         myvars  = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
13317                    "sys-devel/binutils", "sys-devel/libtool",  "dev-lang/python"]
13318         myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
13319         myvars  = portage.util.unique_array(myvars)
13320         myvars.sort()
13321
13322         for x in myvars:
13323                 if portage.isvalidatom(x):
13324                         pkg_matches = trees["/"]["vartree"].dbapi.match(x)
13325                         pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
13326                         pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
13327                         pkgs = []
13328                         for pn, ver, rev in pkg_matches:
13329                                 if rev != "r0":
13330                                         pkgs.append(ver + "-" + rev)
13331                                 else:
13332                                         pkgs.append(ver)
13333                         if pkgs:
13334                                 pkgs = ", ".join(pkgs)
13335                                 print "%-20s %s" % (x+":", pkgs)
13336                 else:
13337                         print "%-20s %s" % (x+":", "[NOT VALID]")
13338
13339         libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
13340
13341         if "--verbose" in myopts:
13342                 myvars=settings.keys()
13343         else:
13344                 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
13345                           'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
13346                           'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
13347                           'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
13348
13349                 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
13350
13351         myvars = portage.util.unique_array(myvars)
13352         unset_vars = []
13353         myvars.sort()
13354         for x in myvars:
13355                 if x in settings:
13356                         if x != "USE":
13357                                 print '%s="%s"' % (x, settings[x])
13358                         else:
13359                                 use = set(settings["USE"].split())
13360                                 use_expand = settings["USE_EXPAND"].split()
13361                                 use_expand.sort()
13362                                 for varname in use_expand:
13363                                         flag_prefix = varname.lower() + "_"
13364                                         for f in list(use):
13365                                                 if f.startswith(flag_prefix):
13366                                                         use.remove(f)
13367                                 use = list(use)
13368                                 use.sort()
13369                                 print 'USE="%s"' % " ".join(use),
13370                                 for varname in use_expand:
13371                                         myval = settings.get(varname)
13372                                         if myval:
13373                                                 print '%s="%s"' % (varname, myval),
13374                                 print
13375                 else:
13376                         unset_vars.append(x)
13377         if unset_vars:
13378                 print "Unset:  "+", ".join(unset_vars)
13379         print
13380
13381         if "--debug" in myopts:
13382                 for x in dir(portage):
13383                         module = getattr(portage, x)
13384                         if "cvs_id_string" in dir(module):
13385                                 print "%s: %s" % (str(x), str(module.cvs_id_string))
13386
13387         # See if we can find any packages installed matching the strings
13388         # passed on the command line
13389         mypkgs = []
13390         vardb = trees[settings["ROOT"]]["vartree"].dbapi
13391         portdb = trees[settings["ROOT"]]["porttree"].dbapi
13392         for x in myfiles:
13393                 mypkgs.extend(vardb.match(x))
13394
13395         # If some packages were found...
13396         if mypkgs:
13397                 # Get our global settings (we only print stuff if it varies from
13398                 # the current config)
13399                 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
13400                 auxkeys = mydesiredvars + [ "USE", "IUSE"]
13401                 global_vals = {}
13402                 pkgsettings = portage.config(clone=settings)
13403
13404                 for myvar in mydesiredvars:
13405                         global_vals[myvar] = set(settings.get(myvar, "").split())
13406
13407                 # Loop through each package
13408                 # Only print settings if they differ from global settings
13409                 header_title = "Package Settings"
13410                 print header_width * "="
13411                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13412                 print header_width * "="
13413                 from portage.output import EOutput
13414                 out = EOutput()
13415                 for pkg in mypkgs:
13416                         # Get all package specific variables
13417                         auxvalues = vardb.aux_get(pkg, auxkeys)
13418                         valuesmap = {}
13419                         for i in xrange(len(auxkeys)):
13420                                 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
13421                         diff_values = {}
13422                         for myvar in mydesiredvars:
13423                                 # If the package variable doesn't match the
13424                                 # current global variable, something has changed
13425                                 # so set diff_found so we know to print
13426                                 if valuesmap[myvar] != global_vals[myvar]:
13427                                         diff_values[myvar] = valuesmap[myvar]
13428                         valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
13429                         valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
13430                         pkgsettings.reset()
13431                         # If a matching ebuild is no longer available in the tree, maybe it
13432                         # would make sense to compare against the flags for the best
13433                         # available version with the same slot?
13434                         mydb = None
13435                         if portdb.cpv_exists(pkg):
13436                                 mydb = portdb
13437                         pkgsettings.setcpv(pkg, mydb=mydb)
13438                         if valuesmap["IUSE"].intersection(
13439                                 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
13440                                 diff_values["USE"] = valuesmap["USE"]
13441                         # If a difference was found, print the info for
13442                         # this package.
13443                         if diff_values:
13444                                 # Print package info
13445                                 print "%s was built with the following:" % pkg
13446                                 for myvar in mydesiredvars + ["USE"]:
13447                                         if myvar in diff_values:
13448                                                 mylist = list(diff_values[myvar])
13449                                                 mylist.sort()
13450                                                 print "%s=\"%s\"" % (myvar, " ".join(mylist))
13451                                 print
13452                         print ">>> Attempting to run pkg_info() for '%s'" % pkg
13453                         ebuildpath = vardb.findname(pkg)
13454                         if not ebuildpath or not os.path.exists(ebuildpath):
13455                                 out.ewarn("No ebuild found for '%s'" % pkg)
13456                                 continue
13457                         portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13458                                 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13459                                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13460                                 tree="vartree")
13461
13462 def action_search(root_config, myopts, myfiles, spinner):
13463         if not myfiles:
13464                 print "emerge: no search terms provided."
13465         else:
13466                 searchinstance = search(root_config,
13467                         spinner, "--searchdesc" in myopts,
13468                         "--quiet" not in myopts, "--usepkg" in myopts,
13469                         "--usepkgonly" in myopts)
13470                 for mysearch in myfiles:
13471                         try:
13472                                 searchinstance.execute(mysearch)
13473                         except re.error, comment:
13474                                 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13475                                 sys.exit(1)
13476                         searchinstance.output()
13477
13478 def action_depclean(settings, trees, ldpath_mtimes,
13479         myopts, action, myfiles, spinner):
13480         # Kill packages that aren't explicitly merged or are required as a
13481         # dependency of another package. World file is explicit.
13482
13483         # Global depclean or prune operations are not very safe when there are
13484         # missing dependencies since it's unknown how badly incomplete
13485         # the dependency graph is, and we might accidentally remove packages
13486         # that should have been pulled into the graph. On the other hand, it's
13487         # relatively safe to ignore missing deps when only asked to remove
13488         # specific packages.
13489         allow_missing_deps = len(myfiles) > 0
13490
13491         msg = []
13492         msg.append("Always study the list of packages to be cleaned for any obvious\n")
13493         msg.append("mistakes. Packages that are part of the world set will always\n")
13494         msg.append("be kept.  They can be manually added to this set with\n")
13495         msg.append(good("`emerge --noreplace <atom>`") + ".  Packages that are listed in\n")
13496         msg.append("package.provided (see portage(5)) will be removed by\n")
13497         msg.append("depclean, even if they are part of the world set.\n")
13498         msg.append("\n")
13499         msg.append("As a safety measure, depclean will not remove any packages\n")
13500         msg.append("unless *all* required dependencies have been resolved.  As a\n")
13501         msg.append("consequence, it is often necessary to run %s\n" % \
13502                 good("`emerge --update"))
13503         msg.append(good("--newuse --deep @system @world`") + \
13504                 " prior to depclean.\n")
13505
13506         if action == "depclean" and "--quiet" not in myopts and not myfiles:
13507                 portage.writemsg_stdout("\n")
13508                 for x in msg:
13509                         portage.writemsg_stdout(colorize("WARN", " * ") + x)
13510
13511         xterm_titles = "notitles" not in settings.features
13512         myroot = settings["ROOT"]
13513         root_config = trees[myroot]["root_config"]
13514         getSetAtoms = root_config.setconfig.getSetAtoms
13515         vardb = trees[myroot]["vartree"].dbapi
13516
13517         required_set_names = ("system", "world")
13518         required_sets = {}
13519         set_args = []
13520
13521         for s in required_set_names:
13522                 required_sets[s] = InternalPackageSet(
13523                         initial_atoms=getSetAtoms(s))
13524
13525         
13526         # When removing packages, use a temporary version of world
13527         # which excludes packages that are intended to be eligible for
13528         # removal.
13529         world_temp_set = required_sets["world"]
13530         system_set = required_sets["system"]
13531
13532         if not system_set or not world_temp_set:
13533
13534                 if not system_set:
13535                         writemsg_level("!!! You have no system list.\n",
13536                                 level=logging.ERROR, noiselevel=-1)
13537
13538                 if not world_temp_set:
13539                         writemsg_level("!!! You have no world file.\n",
13540                                         level=logging.WARNING, noiselevel=-1)
13541
13542                 writemsg_level("!!! Proceeding is likely to " + \
13543                         "break your installation.\n",
13544                         level=logging.WARNING, noiselevel=-1)
13545                 if "--pretend" not in myopts:
13546                         countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
13547
13548         if action == "depclean":
13549                 emergelog(xterm_titles, " >>> depclean")
13550
13551         import textwrap
13552         args_set = InternalPackageSet()
13553         if myfiles:
13554                 for x in myfiles:
13555                         if not is_valid_package_atom(x):
13556                                 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
13557                                         level=logging.ERROR, noiselevel=-1)
13558                                 writemsg_level("!!! Please check ebuild(5) for full details.\n")
13559                                 return
13560                         try:
13561                                 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
13562                         except portage.exception.AmbiguousPackageName, e:
13563                                 msg = "The short ebuild name \"" + x + \
13564                                         "\" is ambiguous.  Please specify " + \
13565                                         "one of the following " + \
13566                                         "fully-qualified ebuild names instead:"
13567                                 for line in textwrap.wrap(msg, 70):
13568                                         writemsg_level("!!! %s\n" % (line,),
13569                                                 level=logging.ERROR, noiselevel=-1)
13570                                 for i in e[0]:
13571                                         writemsg_level("    %s\n" % colorize("INFORM", i),
13572                                                 level=logging.ERROR, noiselevel=-1)
13573                                 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13574                                 return
13575                         args_set.add(atom)
13576                 matched_packages = False
13577                 for x in args_set:
13578                         if vardb.match(x):
13579                                 matched_packages = True
13580                                 break
13581                 if not matched_packages:
13582                         writemsg_level(">>> No packages selected for removal by %s\n" % \
13583                                 action)
13584                         return
13585
13586         writemsg_level("\nCalculating dependencies  ")
13587         resolver_params = create_depgraph_params(myopts, "remove")
13588         resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
13589         vardb = resolver.trees[myroot]["vartree"].dbapi
13590
13591         if action == "depclean":
13592
13593                 if args_set:
13594                         # Pull in everything that's installed but not matched
13595                         # by an argument atom since we don't want to clean any
13596                         # package if something depends on it.
13597
13598                         world_temp_set.clear()
13599                         for pkg in vardb:
13600                                 spinner.update()
13601
13602                                 try:
13603                                         if args_set.findAtomForPackage(pkg) is None:
13604                                                 world_temp_set.add("=" + pkg.cpv)
13605                                                 continue
13606                                 except portage.exception.InvalidDependString, e:
13607                                         show_invalid_depstring_notice(pkg,
13608                                                 pkg.metadata["PROVIDE"], str(e))
13609                                         del e
13610                                         world_temp_set.add("=" + pkg.cpv)
13611                                         continue
13612
13613         elif action == "prune":
13614
13615                 # Pull in everything that's installed since we don't
13616                 # to prune a package if something depends on it.
13617                 world_temp_set.clear()
13618                 world_temp_set.update(vardb.cp_all())
13619
13620                 if not args_set:
13621
13622                         # Try to prune everything that's slotted.
13623                         for cp in vardb.cp_all():
13624                                 if len(vardb.cp_list(cp)) > 1:
13625                                         args_set.add(cp)
13626
13627                 # Remove atoms from world that match installed packages
13628                 # that are also matched by argument atoms, but do not remove
13629                 # them if they match the highest installed version.
13630                 for pkg in vardb:
13631                         spinner.update()
13632                         pkgs_for_cp = vardb.match_pkgs(pkg.cp)
13633                         if not pkgs_for_cp or pkg not in pkgs_for_cp:
13634                                 raise AssertionError("package expected in matches: " + \
13635                                         "cp = %s, cpv = %s matches = %s" % \
13636                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13637
13638                         highest_version = pkgs_for_cp[-1]
13639                         if pkg == highest_version:
13640                                 # pkg is the highest version
13641                                 world_temp_set.add("=" + pkg.cpv)
13642                                 continue
13643
13644                         if len(pkgs_for_cp) <= 1:
13645                                 raise AssertionError("more packages expected: " + \
13646                                         "cp = %s, cpv = %s matches = %s" % \
13647                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13648
13649                         try:
13650                                 if args_set.findAtomForPackage(pkg) is None:
13651                                         world_temp_set.add("=" + pkg.cpv)
13652                                         continue
13653                         except portage.exception.InvalidDependString, e:
13654                                 show_invalid_depstring_notice(pkg,
13655                                         pkg.metadata["PROVIDE"], str(e))
13656                                 del e
13657                                 world_temp_set.add("=" + pkg.cpv)
13658                                 continue
13659
13660         set_args = {}
13661         for s, package_set in required_sets.iteritems():
13662                 set_atom = SETPREFIX + s
13663                 set_arg = SetArg(arg=set_atom, set=package_set,
13664                         root_config=resolver.roots[myroot])
13665                 set_args[s] = set_arg
13666                 for atom in set_arg.set:
13667                         resolver._dep_stack.append(
13668                                 Dependency(atom=atom, root=myroot, parent=set_arg))
13669                         resolver.digraph.add(set_arg, None)
13670
13671         success = resolver._complete_graph()
13672         writemsg_level("\b\b... done!\n")
13673
13674         resolver.display_problems()
13675
13676         if not success:
13677                 return 1
13678
13679         def unresolved_deps():
13680
13681                 unresolvable = set()
13682                 for dep in resolver._initially_unsatisfied_deps:
13683                         if isinstance(dep.parent, Package) and \
13684                                 (dep.priority > UnmergeDepPriority.SOFT):
13685                                 unresolvable.add((dep.atom, dep.parent.cpv))
13686
13687                 if not unresolvable:
13688                         return False
13689
13690                 if unresolvable and not allow_missing_deps:
13691                         prefix = bad(" * ")
13692                         msg = []
13693                         msg.append("Dependencies could not be completely resolved due to")
13694                         msg.append("the following required packages not being installed:")
13695                         msg.append("")
13696                         for atom, parent in unresolvable:
13697                                 msg.append("  %s pulled in by:" % (atom,))
13698                                 msg.append("    %s" % (parent,))
13699                                 msg.append("")
13700                         msg.append("Have you forgotten to run " + \
13701                                 good("`emerge --update --newuse --deep @system @world`") + " prior")
13702                         msg.append(("to %s? It may be necessary to manually " + \
13703                                 "uninstall packages that no longer") % action)
13704                         msg.append("exist in the portage tree since " + \
13705                                 "it may not be possible to satisfy their")
13706                         msg.append("dependencies.  Also, be aware of " + \
13707                                 "the --with-bdeps option that is documented")
13708                         msg.append("in " + good("`man emerge`") + ".")
13709                         if action == "prune":
13710                                 msg.append("")
13711                                 msg.append("If you would like to ignore " + \
13712                                         "dependencies then use %s." % good("--nodeps"))
13713                         writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
13714                                 level=logging.ERROR, noiselevel=-1)
13715                         return True
13716                 return False
13717
13718         if unresolved_deps():
13719                 return 1
13720
13721         graph = resolver.digraph.copy()
13722         required_pkgs_total = 0
13723         for node in graph:
13724                 if isinstance(node, Package):
13725                         required_pkgs_total += 1
13726
13727         def show_parents(child_node):
13728                 parent_nodes = graph.parent_nodes(child_node)
13729                 if not parent_nodes:
13730                         # With --prune, the highest version can be pulled in without any
13731                         # real parent since all installed packages are pulled in.  In that
13732                         # case there's nothing to show here.
13733                         return
13734                 parent_strs = []
13735                 for node in parent_nodes:
13736                         parent_strs.append(str(getattr(node, "cpv", node)))
13737                 parent_strs.sort()
13738                 msg = []
13739                 msg.append("  %s pulled in by:\n" % (child_node.cpv,))
13740                 for parent_str in parent_strs:
13741                         msg.append("    %s\n" % (parent_str,))
13742                 msg.append("\n")
13743                 portage.writemsg_stdout("".join(msg), noiselevel=-1)
13744
13745         def cmp_pkg_cpv(pkg1, pkg2):
13746                 """Sort Package instances by cpv."""
13747                 if pkg1.cpv > pkg2.cpv:
13748                         return 1
13749                 elif pkg1.cpv == pkg2.cpv:
13750                         return 0
13751                 else:
13752                         return -1
13753
13754         def create_cleanlist():
13755                 pkgs_to_remove = []
13756
13757                 if action == "depclean":
13758                         if args_set:
13759
13760                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13761                                         arg_atom = None
13762                                         try:
13763                                                 arg_atom = args_set.findAtomForPackage(pkg)
13764                                         except portage.exception.InvalidDependString:
13765                                                 # this error has already been displayed by now
13766                                                 continue
13767
13768                                         if arg_atom:
13769                                                 if pkg not in graph:
13770                                                         pkgs_to_remove.append(pkg)
13771                                                 elif "--verbose" in myopts:
13772                                                         show_parents(pkg)
13773
13774                         else:
13775                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13776                                         if pkg not in graph:
13777                                                 pkgs_to_remove.append(pkg)
13778                                         elif "--verbose" in myopts:
13779                                                 show_parents(pkg)
13780
13781                 elif action == "prune":
13782                         # Prune really uses all installed instead of world. It's not
13783                         # a real reverse dependency so don't display it as such.
13784                         graph.remove(set_args["world"])
13785
13786                         for atom in args_set:
13787                                 for pkg in vardb.match_pkgs(atom):
13788                                         if pkg not in graph:
13789                                                 pkgs_to_remove.append(pkg)
13790                                         elif "--verbose" in myopts:
13791                                                 show_parents(pkg)
13792
13793                 if not pkgs_to_remove:
13794                         writemsg_level(
13795                                 ">>> No packages selected for removal by %s\n" % action)
13796                         if "--verbose" not in myopts:
13797                                 writemsg_level(
13798                                         ">>> To see reverse dependencies, use %s\n" % \
13799                                                 good("--verbose"))
13800                         if action == "prune":
13801                                 writemsg_level(
13802                                         ">>> To ignore dependencies, use %s\n" % \
13803                                                 good("--nodeps"))
13804
13805                 return pkgs_to_remove
13806
13807         cleanlist = create_cleanlist()
13808
13809         if len(cleanlist):
13810                 clean_set = set(cleanlist)
13811
13812                 # Check if any of these package are the sole providers of libraries
13813                 # with consumers that have not been selected for removal. If so, these
13814                 # packages and any dependencies need to be added to the graph.
13815                 real_vardb = trees[myroot]["vartree"].dbapi
13816                 linkmap = real_vardb.linkmap
13817                 liblist = linkmap.listLibraryObjects()
13818                 consumer_cache = {}
13819                 provider_cache = {}
13820                 soname_cache = {}
13821                 consumer_map = {}
13822
13823                 writemsg_level(">>> Checking for lib consumers...\n")
13824
13825                 for pkg in cleanlist:
13826                         pkg_dblink = real_vardb._dblink(pkg.cpv)
13827                         provided_libs = set()
13828
13829                         for lib in liblist:
13830                                 if pkg_dblink.isowner(lib, myroot):
13831                                         provided_libs.add(lib)
13832
13833                         if not provided_libs:
13834                                 continue
13835
13836                         consumers = {}
13837                         for lib in provided_libs:
13838                                 lib_consumers = consumer_cache.get(lib)
13839                                 if lib_consumers is None:
13840                                         lib_consumers = linkmap.findConsumers(lib)
13841                                         consumer_cache[lib] = lib_consumers
13842                                 if lib_consumers:
13843                                         consumers[lib] = lib_consumers
13844
13845                         if not consumers:
13846                                 continue
13847
13848                         for lib, lib_consumers in consumers.items():
13849                                 for consumer_file in list(lib_consumers):
13850                                         if pkg_dblink.isowner(consumer_file, myroot):
13851                                                 lib_consumers.remove(consumer_file)
13852                                 if not lib_consumers:
13853                                         del consumers[lib]
13854
13855                         if not consumers:
13856                                 continue
13857
13858                         for lib, lib_consumers in consumers.iteritems():
13859
13860                                 soname = soname_cache.get(lib)
13861                                 if soname is None:
13862                                         soname = linkmap.getSoname(lib)
13863                                         soname_cache[lib] = soname
13864
13865                                 consumer_providers = []
13866                                 for lib_consumer in lib_consumers:
13867                                         providers = provider_cache.get(lib)
13868                                         if providers is None:
13869                                                 providers = linkmap.findProviders(lib_consumer)
13870                                                 provider_cache[lib_consumer] = providers
13871                                         if soname not in providers:
13872                                                 # Why does this happen?
13873                                                 continue
13874                                         consumer_providers.append(
13875                                                 (lib_consumer, providers[soname]))
13876
13877                                 consumers[lib] = consumer_providers
13878
13879                         consumer_map[pkg] = consumers
13880
13881                 if consumer_map:
13882
13883                         search_files = set()
13884                         for consumers in consumer_map.itervalues():
13885                                 for lib, consumer_providers in consumers.iteritems():
13886                                         for lib_consumer, providers in consumer_providers:
13887                                                 search_files.add(lib_consumer)
13888                                                 search_files.update(providers)
13889
13890                         writemsg_level(">>> Assigning files to packages...\n")
13891                         file_owners = real_vardb._owners.getFileOwnerMap(search_files)
13892
13893                         for pkg, consumers in consumer_map.items():
13894                                 for lib, consumer_providers in consumers.items():
13895                                         lib_consumers = set()
13896
13897                                         for lib_consumer, providers in consumer_providers:
13898                                                 owner_set = file_owners.get(lib_consumer)
13899                                                 provider_dblinks = set()
13900                                                 provider_pkgs = set()
13901
13902                                                 if len(providers) > 1:
13903                                                         for provider in providers:
13904                                                                 provider_set = file_owners.get(provider)
13905                                                                 if provider_set is not None:
13906                                                                         provider_dblinks.update(provider_set)
13907
13908                                                 if len(provider_dblinks) > 1:
13909                                                         for provider_dblink in provider_dblinks:
13910                                                                 pkg_key = ("installed", myroot,
13911                                                                         provider_dblink.mycpv, "nomerge")
13912                                                                 if pkg_key not in clean_set:
13913                                                                         provider_pkgs.add(vardb.get(pkg_key))
13914
13915                                                 if provider_pkgs:
13916                                                         continue
13917
13918                                                 if owner_set is not None:
13919                                                         lib_consumers.update(owner_set)
13920
13921                                         for consumer_dblink in list(lib_consumers):
13922                                                 if ("installed", myroot, consumer_dblink.mycpv,
13923                                                         "nomerge") in clean_set:
13924                                                         lib_consumers.remove(consumer_dblink)
13925                                                         continue
13926
13927                                         if lib_consumers:
13928                                                 consumers[lib] = lib_consumers
13929                                         else:
13930                                                 del consumers[lib]
13931                                 if not consumers:
13932                                         del consumer_map[pkg]
13933
13934                 if consumer_map:
13935                         # TODO: Implement a package set for rebuilding consumer packages.
13936
13937                         msg = "In order to avoid breakage of link level " + \
13938                                 "dependencies, one or more packages will not be removed. " + \
13939                                 "This can be solved by rebuilding " + \
13940                                 "the packages that pulled them in."
13941
13942                         prefix = bad(" * ")
13943                         from textwrap import wrap
13944                         writemsg_level("".join(prefix + "%s\n" % line for \
13945                                 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
13946
13947                         msg = []
13948                         for pkg, consumers in consumer_map.iteritems():
13949                                 unique_consumers = set(chain(*consumers.values()))
13950                                 unique_consumers = sorted(consumer.mycpv \
13951                                         for consumer in unique_consumers)
13952                                 msg.append("")
13953                                 msg.append("  %s pulled in by:" % (pkg.cpv,))
13954                                 for consumer in unique_consumers:
13955                                         msg.append("    %s" % (consumer,))
13956                         msg.append("")
13957                         writemsg_level("".join(prefix + "%s\n" % line for line in msg),
13958                                 level=logging.WARNING, noiselevel=-1)
13959
13960                         # Add lib providers to the graph as children of lib consumers,
13961                         # and also add any dependencies pulled in by the provider.
13962                         writemsg_level(">>> Adding lib providers to graph...\n")
13963
13964                         for pkg, consumers in consumer_map.iteritems():
13965                                 for consumer_dblink in set(chain(*consumers.values())):
13966                                         consumer_pkg = vardb.get(("installed", myroot,
13967                                                 consumer_dblink.mycpv, "nomerge"))
13968                                         if not resolver._add_pkg(pkg,
13969                                                 Dependency(parent=consumer_pkg,
13970                                                 priority=UnmergeDepPriority(runtime=True),
13971                                                 root=pkg.root)):
13972                                                 resolver.display_problems()
13973                                                 return 1
13974
13975                         writemsg_level("\nCalculating dependencies  ")
13976                         success = resolver._complete_graph()
13977                         writemsg_level("\b\b... done!\n")
13978                         resolver.display_problems()
13979                         if not success:
13980                                 return 1
13981                         if unresolved_deps():
13982                                 return 1
13983
13984                         graph = resolver.digraph.copy()
13985                         required_pkgs_total = 0
13986                         for node in graph:
13987                                 if isinstance(node, Package):
13988                                         required_pkgs_total += 1
13989                         cleanlist = create_cleanlist()
13990                         if not cleanlist:
13991                                 return 0
13992                         clean_set = set(cleanlist)
13993
13994                 # Use a topological sort to create an unmerge order such that
13995                 # each package is unmerged before it's dependencies. This is
13996                 # necessary to avoid breaking things that may need to run
13997                 # during pkg_prerm or pkg_postrm phases.
13998
13999                 # Create a new graph to account for dependencies between the
14000                 # packages being unmerged.
14001                 graph = digraph()
14002                 del cleanlist[:]
14003
14004                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
14005                 runtime = UnmergeDepPriority(runtime=True)
14006                 runtime_post = UnmergeDepPriority(runtime_post=True)
14007                 buildtime = UnmergeDepPriority(buildtime=True)
14008                 priority_map = {
14009                         "RDEPEND": runtime,
14010                         "PDEPEND": runtime_post,
14011                         "DEPEND": buildtime,
14012                 }
14013
14014                 for node in clean_set:
14015                         graph.add(node, None)
14016                         mydeps = []
14017                         node_use = node.metadata["USE"].split()
14018                         for dep_type in dep_keys:
14019                                 depstr = node.metadata[dep_type]
14020                                 if not depstr:
14021                                         continue
14022                                 try:
14023                                         portage.dep._dep_check_strict = False
14024                                         success, atoms = portage.dep_check(depstr, None, settings,
14025                                                 myuse=node_use, trees=resolver._graph_trees,
14026                                                 myroot=myroot)
14027                                 finally:
14028                                         portage.dep._dep_check_strict = True
14029                                 if not success:
14030                                         # Ignore invalid deps of packages that will
14031                                         # be uninstalled anyway.
14032                                         continue
14033
14034                                 priority = priority_map[dep_type]
14035                                 for atom in atoms:
14036                                         if not isinstance(atom, portage.dep.Atom):
14037                                                 # Ignore invalid atoms returned from dep_check().
14038                                                 continue
14039                                         if atom.blocker:
14040                                                 continue
14041                                         matches = vardb.match_pkgs(atom)
14042                                         if not matches:
14043                                                 continue
14044                                         for child_node in matches:
14045                                                 if child_node in clean_set:
14046                                                         graph.add(child_node, node, priority=priority)
14047
14048                 ordered = True
14049                 if len(graph.order) == len(graph.root_nodes()):
14050                         # If there are no dependencies between packages
14051                         # let unmerge() group them by cat/pn.
14052                         ordered = False
14053                         cleanlist = [pkg.cpv for pkg in graph.order]
14054                 else:
14055                         # Order nodes from lowest to highest overall reference count for
14056                         # optimal root node selection.
14057                         node_refcounts = {}
14058                         for node in graph.order:
14059                                 node_refcounts[node] = len(graph.parent_nodes(node))
14060                         def cmp_reference_count(node1, node2):
14061                                 return node_refcounts[node1] - node_refcounts[node2]
14062                         graph.order.sort(key=cmp_sort_key(cmp_reference_count))
14063         
14064                         ignore_priority_range = [None]
14065                         ignore_priority_range.extend(
14066                                 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
14067                         while not graph.empty():
14068                                 for ignore_priority in ignore_priority_range:
14069                                         nodes = graph.root_nodes(ignore_priority=ignore_priority)
14070                                         if nodes:
14071                                                 break
14072                                 if not nodes:
14073                                         raise AssertionError("no root nodes")
14074                                 if ignore_priority is not None:
14075                                         # Some deps have been dropped due to circular dependencies,
14076                                         # so only pop one node in order do minimize the number that
14077                                         # are dropped.
14078                                         del nodes[1:]
14079                                 for node in nodes:
14080                                         graph.remove(node)
14081                                         cleanlist.append(node.cpv)
14082
14083                 unmerge(root_config, myopts, "unmerge", cleanlist,
14084                         ldpath_mtimes, ordered=ordered)
14085
14086         if action == "prune":
14087                 return
14088
14089         if not cleanlist and "--quiet" in myopts:
14090                 return
14091
14092         print "Packages installed:   "+str(len(vardb.cpv_all()))
14093         print "Packages in world:    " + \
14094                 str(len(root_config.sets["world"].getAtoms()))
14095         print "Packages in system:   " + \
14096                 str(len(root_config.sets["system"].getAtoms()))
14097         print "Required packages:    "+str(required_pkgs_total)
14098         if "--pretend" in myopts:
14099                 print "Number to remove:     "+str(len(cleanlist))
14100         else:
14101                 print "Number removed:       "+str(len(cleanlist))
14102
14103 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
14104         """
14105         Construct a depgraph for the given resume list. This will raise
14106         PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
14107         @rtype: tuple
14108         @returns: (success, depgraph, dropped_tasks)
14109         """
14110         skip_masked = True
14111         skip_unsatisfied = True
14112         mergelist = mtimedb["resume"]["mergelist"]
14113         dropped_tasks = set()
14114         while True:
14115                 mydepgraph = depgraph(settings, trees,
14116                         myopts, myparams, spinner)
14117                 try:
14118                         success = mydepgraph.loadResumeCommand(mtimedb["resume"],
14119                                 skip_masked=skip_masked)
14120                 except depgraph.UnsatisfiedResumeDep, e:
14121                         if not skip_unsatisfied:
14122                                 raise
14123
14124                         graph = mydepgraph.digraph
14125                         unsatisfied_parents = dict((dep.parent, dep.parent) \
14126                                 for dep in e.value)
14127                         traversed_nodes = set()
14128                         unsatisfied_stack = list(unsatisfied_parents)
14129                         while unsatisfied_stack:
14130                                 pkg = unsatisfied_stack.pop()
14131                                 if pkg in traversed_nodes:
14132                                         continue
14133                                 traversed_nodes.add(pkg)
14134
14135                                 # If this package was pulled in by a parent
14136                                 # package scheduled for merge, removing this
14137                                 # package may cause the the parent package's
14138                                 # dependency to become unsatisfied.
14139                                 for parent_node in graph.parent_nodes(pkg):
14140                                         if not isinstance(parent_node, Package) \
14141                                                 or parent_node.operation not in ("merge", "nomerge"):
14142                                                 continue
14143                                         unsatisfied = \
14144                                                 graph.child_nodes(parent_node,
14145                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
14146                                         if pkg in unsatisfied:
14147                                                 unsatisfied_parents[parent_node] = parent_node
14148                                                 unsatisfied_stack.append(parent_node)
14149
14150                         pruned_mergelist = []
14151                         for x in mergelist:
14152                                 if isinstance(x, list) and \
14153                                         tuple(x) not in unsatisfied_parents:
14154                                         pruned_mergelist.append(x)
14155
14156                         # If the mergelist doesn't shrink then this loop is infinite.
14157                         if len(pruned_mergelist) == len(mergelist):
14158                                 # This happens if a package can't be dropped because
14159                                 # it's already installed, but it has unsatisfied PDEPEND.
14160                                 raise
14161                         mergelist[:] = pruned_mergelist
14162
14163                         # Exclude installed packages that have been removed from the graph due
14164                         # to failure to build/install runtime dependencies after the dependent
14165                         # package has already been installed.
14166                         dropped_tasks.update(pkg for pkg in \
14167                                 unsatisfied_parents if pkg.operation != "nomerge")
14168                         mydepgraph.break_refs(unsatisfied_parents)
14169
14170                         del e, graph, traversed_nodes, \
14171                                 unsatisfied_parents, unsatisfied_stack
14172                         continue
14173                 else:
14174                         break
14175         return (success, mydepgraph, dropped_tasks)
14176
14177 def action_build(settings, trees, mtimedb,
14178         myopts, myaction, myfiles, spinner):
14179
14180         # validate the state of the resume data
14181         # so that we can make assumptions later.
14182         for k in ("resume", "resume_backup"):
14183                 if k not in mtimedb:
14184                         continue
14185                 resume_data = mtimedb[k]
14186                 if not isinstance(resume_data, dict):
14187                         del mtimedb[k]
14188                         continue
14189                 mergelist = resume_data.get("mergelist")
14190                 if not isinstance(mergelist, list):
14191                         del mtimedb[k]
14192                         continue
14193                 for x in mergelist:
14194                         if not (isinstance(x, list) and len(x) == 4):
14195                                 continue
14196                         pkg_type, pkg_root, pkg_key, pkg_action = x
14197                         if pkg_root not in trees:
14198                                 # Current $ROOT setting differs,
14199                                 # so the list must be stale.
14200                                 mergelist = None
14201                                 break
14202                 if not mergelist:
14203                         del mtimedb[k]
14204                         continue
14205                 resume_opts = resume_data.get("myopts")
14206                 if not isinstance(resume_opts, (dict, list)):
14207                         del mtimedb[k]
14208                         continue
14209                 favorites = resume_data.get("favorites")
14210                 if not isinstance(favorites, list):
14211                         del mtimedb[k]
14212                         continue
14213
14214         resume = False
14215         if "--resume" in myopts and \
14216                 ("resume" in mtimedb or
14217                 "resume_backup" in mtimedb):
14218                 resume = True
14219                 if "resume" not in mtimedb:
14220                         mtimedb["resume"] = mtimedb["resume_backup"]
14221                         del mtimedb["resume_backup"]
14222                         mtimedb.commit()
14223                 # "myopts" is a list for backward compatibility.
14224                 resume_opts = mtimedb["resume"].get("myopts", [])
14225                 if isinstance(resume_opts, list):
14226                         resume_opts = dict((k,True) for k in resume_opts)
14227                 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
14228                         resume_opts.pop(opt, None)
14229                 myopts.update(resume_opts)
14230
14231                 if "--debug" in myopts:
14232                         writemsg_level("myopts %s\n" % (myopts,))
14233
14234                 # Adjust config according to options of the command being resumed.
14235                 for myroot in trees:
14236                         mysettings =  trees[myroot]["vartree"].settings
14237                         mysettings.unlock()
14238                         adjust_config(myopts, mysettings)
14239                         mysettings.lock()
14240                         del myroot, mysettings
14241
14242         ldpath_mtimes = mtimedb["ldpath"]
14243         favorites=[]
14244         merge_count = 0
14245         buildpkgonly = "--buildpkgonly" in myopts
14246         pretend = "--pretend" in myopts
14247         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14248         ask = "--ask" in myopts
14249         nodeps = "--nodeps" in myopts
14250         oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
14251         tree = "--tree" in myopts
14252         if nodeps and tree:
14253                 tree = False
14254                 del myopts["--tree"]
14255                 portage.writemsg(colorize("WARN", " * ") + \
14256                         "--tree is broken with --nodeps. Disabling...\n")
14257         debug = "--debug" in myopts
14258         verbose = "--verbose" in myopts
14259         quiet = "--quiet" in myopts
14260         if pretend or fetchonly:
14261                 # make the mtimedb readonly
14262                 mtimedb.filename = None
14263         if "--digest" in myopts:
14264                 msg = "The --digest option can prevent corruption from being" + \
14265                         " noticed. The `repoman manifest` command is the preferred" + \
14266                         " way to generate manifests and it is capable of doing an" + \
14267                         " entire repository or category at once."
14268                 prefix = bad(" * ")
14269                 writemsg(prefix + "\n")
14270                 from textwrap import wrap
14271                 for line in wrap(msg, 72):
14272                         writemsg("%s%s\n" % (prefix, line))
14273                 writemsg(prefix + "\n")
14274
14275         if "--quiet" not in myopts and \
14276                 ("--pretend" in myopts or "--ask" in myopts or \
14277                 "--tree" in myopts or "--verbose" in myopts):
14278                 action = ""
14279                 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14280                         action = "fetched"
14281                 elif "--buildpkgonly" in myopts:
14282                         action = "built"
14283                 else:
14284                         action = "merged"
14285                 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
14286                         print
14287                         print darkgreen("These are the packages that would be %s, in reverse order:") % action
14288                         print
14289                 else:
14290                         print
14291                         print darkgreen("These are the packages that would be %s, in order:") % action
14292                         print
14293
14294         show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
14295         if not show_spinner:
14296                 spinner.update = spinner.update_quiet
14297
14298         if resume:
14299                 favorites = mtimedb["resume"].get("favorites")
14300                 if not isinstance(favorites, list):
14301                         favorites = []
14302
14303                 if show_spinner:
14304                         print "Calculating dependencies  ",
14305                 myparams = create_depgraph_params(myopts, myaction)
14306
14307                 resume_data = mtimedb["resume"]
14308                 mergelist = resume_data["mergelist"]
14309                 if mergelist and "--skipfirst" in myopts:
14310                         for i, task in enumerate(mergelist):
14311                                 if isinstance(task, list) and \
14312                                         task and task[-1] == "merge":
14313                                         del mergelist[i]
14314                                         break
14315
14316                 success = False
14317                 mydepgraph = None
14318                 try:
14319                         success, mydepgraph, dropped_tasks = resume_depgraph(
14320                                 settings, trees, mtimedb, myopts, myparams, spinner)
14321                 except (portage.exception.PackageNotFound,
14322                         depgraph.UnsatisfiedResumeDep), e:
14323                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
14324                                 mydepgraph = e.depgraph
14325                         if show_spinner:
14326                                 print
14327                         from textwrap import wrap
14328                         from portage.output import EOutput
14329                         out = EOutput()
14330
14331                         resume_data = mtimedb["resume"]
14332                         mergelist = resume_data.get("mergelist")
14333                         if not isinstance(mergelist, list):
14334                                 mergelist = []
14335                         if mergelist and debug or (verbose and not quiet):
14336                                 out.eerror("Invalid resume list:")
14337                                 out.eerror("")
14338                                 indent = "  "
14339                                 for task in mergelist:
14340                                         if isinstance(task, list):
14341                                                 out.eerror(indent + str(tuple(task)))
14342                                 out.eerror("")
14343
14344                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
14345                                 out.eerror("One or more packages are either masked or " + \
14346                                         "have missing dependencies:")
14347                                 out.eerror("")
14348                                 indent = "  "
14349                                 for dep in e.value:
14350                                         if dep.atom is None:
14351                                                 out.eerror(indent + "Masked package:")
14352                                                 out.eerror(2 * indent + str(dep.parent))
14353                                                 out.eerror("")
14354                                         else:
14355                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
14356                                                 out.eerror(2 * indent + str(dep.parent))
14357                                                 out.eerror("")
14358                                 msg = "The resume list contains packages " + \
14359                                         "that are either masked or have " + \
14360                                         "unsatisfied dependencies. " + \
14361                                         "Please restart/continue " + \
14362                                         "the operation manually, or use --skipfirst " + \
14363                                         "to skip the first package in the list and " + \
14364                                         "any other packages that may be " + \
14365                                         "masked or have missing dependencies."
14366                                 for line in wrap(msg, 72):
14367                                         out.eerror(line)
14368                         elif isinstance(e, portage.exception.PackageNotFound):
14369                                 out.eerror("An expected package is " + \
14370                                         "not available: %s" % str(e))
14371                                 out.eerror("")
14372                                 msg = "The resume list contains one or more " + \
14373                                         "packages that are no longer " + \
14374                                         "available. Please restart/continue " + \
14375                                         "the operation manually."
14376                                 for line in wrap(msg, 72):
14377                                         out.eerror(line)
14378                 else:
14379                         if show_spinner:
14380                                 print "\b\b... done!"
14381
14382                 if success:
14383                         if dropped_tasks:
14384                                 portage.writemsg("!!! One or more packages have been " + \
14385                                         "dropped due to\n" + \
14386                                         "!!! masking or unsatisfied dependencies:\n\n",
14387                                         noiselevel=-1)
14388                                 for task in dropped_tasks:
14389                                         portage.writemsg("  " + str(task) + "\n", noiselevel=-1)
14390                                 portage.writemsg("\n", noiselevel=-1)
14391                         del dropped_tasks
14392                 else:
14393                         if mydepgraph is not None:
14394                                 mydepgraph.display_problems()
14395                         if not (ask or pretend):
14396                                 # delete the current list and also the backup
14397                                 # since it's probably stale too.
14398                                 for k in ("resume", "resume_backup"):
14399                                         mtimedb.pop(k, None)
14400                                 mtimedb.commit()
14401
14402                         return 1
14403         else:
14404                 if ("--resume" in myopts):
14405                         print darkgreen("emerge: It seems we have nothing to resume...")
14406                         return os.EX_OK
14407
14408                 myparams = create_depgraph_params(myopts, myaction)
14409                 if "--quiet" not in myopts and "--nodeps" not in myopts:
14410                         print "Calculating dependencies  ",
14411                         sys.stdout.flush()
14412                 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
14413                 try:
14414                         retval, favorites = mydepgraph.select_files(myfiles)
14415                 except portage.exception.PackageNotFound, e:
14416                         portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
14417                         return 1
14418                 except portage.exception.PackageSetNotFound, e:
14419                         root_config = trees[settings["ROOT"]]["root_config"]
14420                         display_missing_pkg_set(root_config, e.value)
14421                         return 1
14422                 if show_spinner:
14423                         print "\b\b... done!"
14424                 if not retval:
14425                         mydepgraph.display_problems()
14426                         return 1
14427
14428         if "--pretend" not in myopts and \
14429                 ("--ask" in myopts or "--tree" in myopts or \
14430                 "--verbose" in myopts) and \
14431                 not ("--quiet" in myopts and "--ask" not in myopts):
14432                 if "--resume" in myopts:
14433                         mymergelist = mydepgraph.altlist()
14434                         if len(mymergelist) == 0:
14435                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14436                                 return os.EX_OK
14437                         favorites = mtimedb["resume"]["favorites"]
14438                         retval = mydepgraph.display(
14439                                 mydepgraph.altlist(reversed=tree),
14440                                 favorites=favorites)
14441                         mydepgraph.display_problems()
14442                         if retval != os.EX_OK:
14443                                 return retval
14444                         prompt="Would you like to resume merging these packages?"
14445                 else:
14446                         retval = mydepgraph.display(
14447                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
14448                                 favorites=favorites)
14449                         mydepgraph.display_problems()
14450                         if retval != os.EX_OK:
14451                                 return retval
14452                         mergecount=0
14453                         for x in mydepgraph.altlist():
14454                                 if isinstance(x, Package) and x.operation == "merge":
14455                                         mergecount += 1
14456
14457                         if mergecount==0:
14458                                 sets = trees[settings["ROOT"]]["root_config"].sets
14459                                 world_candidates = None
14460                                 if "--noreplace" in myopts and \
14461                                         not oneshot and favorites:
14462                                         # Sets that are not world candidates are filtered
14463                                         # out here since the favorites list needs to be
14464                                         # complete for depgraph.loadResumeCommand() to
14465                                         # operate correctly.
14466                                         world_candidates = [x for x in favorites \
14467                                                 if not (x.startswith(SETPREFIX) and \
14468                                                 not sets[x[1:]].world_candidate)]
14469                                 if "--noreplace" in myopts and \
14470                                         not oneshot and world_candidates:
14471                                         print
14472                                         for x in world_candidates:
14473                                                 print " %s %s" % (good("*"), x)
14474                                         prompt="Would you like to add these packages to your world favorites?"
14475                                 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
14476                                         prompt="Nothing to merge; would you like to auto-clean packages?"
14477                                 else:
14478                                         print
14479                                         print "Nothing to merge; quitting."
14480                                         print
14481                                         return os.EX_OK
14482                         elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14483                                 prompt="Would you like to fetch the source files for these packages?"
14484                         else:
14485                                 prompt="Would you like to merge these packages?"
14486                 print
14487                 if "--ask" in myopts and userquery(prompt) == "No":
14488                         print
14489                         print "Quitting."
14490                         print
14491                         return os.EX_OK
14492                 # Don't ask again (e.g. when auto-cleaning packages after merge)
14493                 myopts.pop("--ask", None)
14494
14495         if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14496                 if ("--resume" in myopts):
14497                         mymergelist = mydepgraph.altlist()
14498                         if len(mymergelist) == 0:
14499                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14500                                 return os.EX_OK
14501                         favorites = mtimedb["resume"]["favorites"]
14502                         retval = mydepgraph.display(
14503                                 mydepgraph.altlist(reversed=tree),
14504                                 favorites=favorites)
14505                         mydepgraph.display_problems()
14506                         if retval != os.EX_OK:
14507                                 return retval
14508                 else:
14509                         retval = mydepgraph.display(
14510                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
14511                                 favorites=favorites)
14512                         mydepgraph.display_problems()
14513                         if retval != os.EX_OK:
14514                                 return retval
14515                         if "--buildpkgonly" in myopts:
14516                                 graph_copy = mydepgraph.digraph.clone()
14517                                 removed_nodes = set()
14518                                 for node in graph_copy:
14519                                         if not isinstance(node, Package) or \
14520                                                 node.operation == "nomerge":
14521                                                 removed_nodes.add(node)
14522                                 graph_copy.difference_update(removed_nodes)
14523                                 if not graph_copy.hasallzeros(ignore_priority = \
14524                                         DepPrioritySatisfiedRange.ignore_medium):
14525                                         print "\n!!! --buildpkgonly requires all dependencies to be merged."
14526                                         print "!!! You have to merge the dependencies before you can build this package.\n"
14527                                         return 1
14528         else:
14529                 if "--buildpkgonly" in myopts:
14530                         graph_copy = mydepgraph.digraph.clone()
14531                         removed_nodes = set()
14532                         for node in graph_copy:
14533                                 if not isinstance(node, Package) or \
14534                                         node.operation == "nomerge":
14535                                         removed_nodes.add(node)
14536                         graph_copy.difference_update(removed_nodes)
14537                         if not graph_copy.hasallzeros(ignore_priority = \
14538                                 DepPrioritySatisfiedRange.ignore_medium):
14539                                 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14540                                 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
14541                                 return 1
14542
14543                 if ("--resume" in myopts):
14544                         favorites=mtimedb["resume"]["favorites"]
14545                         mymergelist = mydepgraph.altlist()
14546                         mydepgraph.break_refs(mymergelist)
14547                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
14548                                 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
14549                         del mydepgraph, mymergelist
14550                         clear_caches(trees)
14551
14552                         retval = mergetask.merge()
14553                         merge_count = mergetask.curval
14554                 else:
14555                         if "resume" in mtimedb and \
14556                         "mergelist" in mtimedb["resume"] and \
14557                         len(mtimedb["resume"]["mergelist"]) > 1:
14558                                 mtimedb["resume_backup"] = mtimedb["resume"]
14559                                 del mtimedb["resume"]
14560                                 mtimedb.commit()
14561                         mtimedb["resume"]={}
14562                         # Stored as a dict starting with portage-2.1.6_rc1, and supported
14563                         # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
14564                         # a list type for options.
14565                         mtimedb["resume"]["myopts"] = myopts.copy()
14566
14567                         # Convert Atom instances to plain str.
14568                         mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
14569
14570                         if ("--digest" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14571                                 for pkgline in mydepgraph.altlist():
14572                                         if pkgline[0]=="ebuild" and pkgline[3]=="merge":
14573                                                 y = trees[pkgline[1]]["porttree"].dbapi.findname(pkgline[2])
14574                                                 tmpsettings = portage.config(clone=settings)
14575                                                 edebug = 0
14576                                                 if settings.get("PORTAGE_DEBUG", "") == "1":
14577                                                         edebug = 1
14578                                                 retval = portage.doebuild(
14579                                                         y, "digest", settings["ROOT"], tmpsettings, edebug,
14580                                                         ("--pretend" in myopts),
14581                                                         mydbapi=trees[pkgline[1]]["porttree"].dbapi,
14582                                                         tree="porttree")
14583
14584                         pkglist = mydepgraph.altlist()
14585                         mydepgraph.saveNomergeFavorites()
14586                         mydepgraph.break_refs(pkglist)
14587                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
14588                                 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
14589                         del mydepgraph, pkglist
14590                         clear_caches(trees)
14591
14592                         retval = mergetask.merge()
14593                         merge_count = mergetask.curval
14594
14595                 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
14596                         if "yes" == settings.get("AUTOCLEAN"):
14597                                 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
14598                                 unmerge(trees[settings["ROOT"]]["root_config"],
14599                                         myopts, "clean", [],
14600                                         ldpath_mtimes, autoclean=1)
14601                         else:
14602                                 portage.writemsg_stdout(colorize("WARN", "WARNING:")
14603                                         + " AUTOCLEAN is disabled.  This can cause serious"
14604                                         + " problems due to overlapping packages.\n")
14605                         trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
14606
14607                 return retval
14608
14609 def multiple_actions(action1, action2):
14610         sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
14611         sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
14612         sys.exit(1)
14613
14614 def insert_optional_args(args):
14615         """
14616         Parse optional arguments and insert a value if one has
14617         not been provided. This is done before feeding the args
14618         to the optparse parser since that parser does not support
14619         this feature natively.
14620         """
14621
14622         new_args = []
14623         jobs_opts = ("-j", "--jobs")
14624         arg_stack = args[:]
14625         arg_stack.reverse()
14626         while arg_stack:
14627                 arg = arg_stack.pop()
14628
14629                 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
14630                 if not (short_job_opt or arg in jobs_opts):
14631                         new_args.append(arg)
14632                         continue
14633
14634                 # Insert an empty placeholder in order to
14635                 # satisfy the requirements of optparse.
14636
14637                 new_args.append("--jobs")
14638                 job_count = None
14639                 saved_opts = None
14640                 if short_job_opt and len(arg) > 2:
14641                         if arg[:2] == "-j":
14642                                 try:
14643                                         job_count = int(arg[2:])
14644                                 except ValueError:
14645                                         saved_opts = arg[2:]
14646                         else:
14647                                 job_count = "True"
14648                                 saved_opts = arg[1:].replace("j", "")
14649
14650                 if job_count is None and arg_stack:
14651                         try:
14652                                 job_count = int(arg_stack[-1])
14653                         except ValueError:
14654                                 pass
14655                         else:
14656                                 # Discard the job count from the stack
14657                                 # since we're consuming it here.
14658                                 arg_stack.pop()
14659
14660                 if job_count is None:
14661                         # unlimited number of jobs
14662                         new_args.append("True")
14663                 else:
14664                         new_args.append(str(job_count))
14665
14666                 if saved_opts is not None:
14667                         new_args.append("-" + saved_opts)
14668
14669         return new_args
14670
14671 def parse_opts(tmpcmdline, silent=False):
14672         myaction=None
14673         myopts = {}
14674         myfiles=[]
14675
14676         global actions, options, shortmapping
14677
14678         longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
14679         argument_options = {
14680                 "--config-root": {
14681                         "help":"specify the location for portage configuration files",
14682                         "action":"store"
14683                 },
14684                 "--color": {
14685                         "help":"enable or disable color output",
14686                         "type":"choice",
14687                         "choices":("y", "n")
14688                 },
14689
14690                 "--jobs": {
14691
14692                         "help"   : "Specifies the number of packages to build " + \
14693                                 "simultaneously.",
14694
14695                         "action" : "store"
14696                 },
14697
14698                 "--load-average": {
14699
14700                         "help"   :"Specifies that no new builds should be started " + \
14701                                 "if there are other builds running and the load average " + \
14702                                 "is at least LOAD (a floating-point number).",
14703
14704                         "action" : "store"
14705                 },
14706
14707                 "--with-bdeps": {
14708                         "help":"include unnecessary build time dependencies",
14709                         "type":"choice",
14710                         "choices":("y", "n")
14711                 },
14712                 "--reinstall": {
14713                         "help":"specify conditions to trigger package reinstallation",
14714                         "type":"choice",
14715                         "choices":["changed-use"]
14716                 }
14717         }
14718
14719         from optparse import OptionParser
14720         parser = OptionParser()
14721         if parser.has_option("--help"):
14722                 parser.remove_option("--help")
14723
14724         for action_opt in actions:
14725                 parser.add_option("--" + action_opt, action="store_true",
14726                         dest=action_opt.replace("-", "_"), default=False)
14727         for myopt in options:
14728                 parser.add_option(myopt, action="store_true",
14729                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
14730         for shortopt, longopt in shortmapping.iteritems():
14731                 parser.add_option("-" + shortopt, action="store_true",
14732                         dest=longopt.lstrip("--").replace("-", "_"), default=False)
14733         for myalias, myopt in longopt_aliases.iteritems():
14734                 parser.add_option(myalias, action="store_true",
14735                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
14736
14737         for myopt, kwargs in argument_options.iteritems():
14738                 parser.add_option(myopt,
14739                         dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
14740
14741         tmpcmdline = insert_optional_args(tmpcmdline)
14742
14743         myoptions, myargs = parser.parse_args(args=tmpcmdline)
14744
14745         if myoptions.jobs:
14746                 jobs = None
14747                 if myoptions.jobs == "True":
14748                         jobs = True
14749                 else:
14750                         try:
14751                                 jobs = int(myoptions.jobs)
14752                         except ValueError:
14753                                 jobs = -1
14754
14755                 if jobs is not True and \
14756                         jobs < 1:
14757                         jobs = None
14758                         if not silent:
14759                                 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
14760                                         (myoptions.jobs,), noiselevel=-1)
14761
14762                 myoptions.jobs = jobs
14763
14764         if myoptions.load_average:
14765                 try:
14766                         load_average = float(myoptions.load_average)
14767                 except ValueError:
14768                         load_average = 0.0
14769
14770                 if load_average <= 0.0:
14771                         load_average = None
14772                         if not silent:
14773                                 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
14774                                         (myoptions.load_average,), noiselevel=-1)
14775
14776                 myoptions.load_average = load_average
14777
14778         for myopt in options:
14779                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
14780                 if v:
14781                         myopts[myopt] = True
14782
14783         for myopt in argument_options:
14784                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
14785                 if v is not None:
14786                         myopts[myopt] = v
14787
14788         for action_opt in actions:
14789                 v = getattr(myoptions, action_opt.replace("-", "_"))
14790                 if v:
14791                         if myaction:
14792                                 multiple_actions(myaction, action_opt)
14793                                 sys.exit(1)
14794                         myaction = action_opt
14795
14796         myfiles += myargs
14797
14798         return myaction, myopts, myfiles
14799
14800 def validate_ebuild_environment(trees):
14801         for myroot in trees:
14802                 settings = trees[myroot]["vartree"].settings
14803                 settings.validate()
14804
14805 def clear_caches(trees):
14806         for d in trees.itervalues():
14807                 d["porttree"].dbapi.melt()
14808                 d["porttree"].dbapi._aux_cache.clear()
14809                 d["bintree"].dbapi._aux_cache.clear()
14810                 d["bintree"].dbapi._clear_cache()
14811                 d["vartree"].dbapi.linkmap._clear_cache()
14812         portage.dircache.clear()
14813         gc.collect()
14814
14815 def load_emerge_config(trees=None):
14816         kwargs = {}
14817         for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
14818                 v = os.environ.get(envvar, None)
14819                 if v and v.strip():
14820                         kwargs[k] = v
14821         trees = portage.create_trees(trees=trees, **kwargs)
14822
14823         for root, root_trees in trees.iteritems():
14824                 settings = root_trees["vartree"].settings
14825                 setconfig = load_default_config(settings, root_trees)
14826                 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
14827
14828         settings = trees["/"]["vartree"].settings
14829
14830         for myroot in trees:
14831                 if myroot != "/":
14832                         settings = trees[myroot]["vartree"].settings
14833                         break
14834
14835         mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
14836         mtimedb = portage.MtimeDB(mtimedbfile)
14837         
14838         return settings, trees, mtimedb
14839
14840 def adjust_config(myopts, settings):
14841         """Make emerge specific adjustments to the config."""
14842
14843         # To enhance usability, make some vars case insensitive by forcing them to
14844         # lower case.
14845         for myvar in ("AUTOCLEAN", "NOCOLOR"):
14846                 if myvar in settings:
14847                         settings[myvar] = settings[myvar].lower()
14848                         settings.backup_changes(myvar)
14849         del myvar
14850
14851         # Kill noauto as it will break merges otherwise.
14852         if "noauto" in settings.features:
14853                 while "noauto" in settings.features:
14854                         settings.features.remove("noauto")
14855                 settings["FEATURES"] = " ".join(settings.features)
14856                 settings.backup_changes("FEATURES")
14857
14858         CLEAN_DELAY = 5
14859         try:
14860                 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
14861         except ValueError, e:
14862                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14863                 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
14864                         settings["CLEAN_DELAY"], noiselevel=-1)
14865         settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
14866         settings.backup_changes("CLEAN_DELAY")
14867
14868         EMERGE_WARNING_DELAY = 10
14869         try:
14870                 EMERGE_WARNING_DELAY = int(settings.get(
14871                         "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
14872         except ValueError, e:
14873                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14874                 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
14875                         settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
14876         settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
14877         settings.backup_changes("EMERGE_WARNING_DELAY")
14878
14879         if "--quiet" in myopts:
14880                 settings["PORTAGE_QUIET"]="1"
14881                 settings.backup_changes("PORTAGE_QUIET")
14882
14883         if "--verbose" in myopts:
14884                 settings["PORTAGE_VERBOSE"] = "1"
14885                 settings.backup_changes("PORTAGE_VERBOSE")
14886
14887         # Set so that configs will be merged regardless of remembered status
14888         if ("--noconfmem" in myopts):
14889                 settings["NOCONFMEM"]="1"
14890                 settings.backup_changes("NOCONFMEM")
14891
14892         # Set various debug markers... They should be merged somehow.
14893         PORTAGE_DEBUG = 0
14894         try:
14895                 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
14896                 if PORTAGE_DEBUG not in (0, 1):
14897                         portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
14898                                 PORTAGE_DEBUG, noiselevel=-1)
14899                         portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
14900                                 noiselevel=-1)
14901                         PORTAGE_DEBUG = 0
14902         except ValueError, e:
14903                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14904                 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
14905                         settings["PORTAGE_DEBUG"], noiselevel=-1)
14906                 del e
14907         if "--debug" in myopts:
14908                 PORTAGE_DEBUG = 1
14909         settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
14910         settings.backup_changes("PORTAGE_DEBUG")
14911
14912         if settings.get("NOCOLOR") not in ("yes","true"):
14913                 portage.output.havecolor = 1
14914
14915         """The explicit --color < y | n > option overrides the NOCOLOR environment
14916         variable and stdout auto-detection."""
14917         if "--color" in myopts:
14918                 if "y" == myopts["--color"]:
14919                         portage.output.havecolor = 1
14920                         settings["NOCOLOR"] = "false"
14921                 else:
14922                         portage.output.havecolor = 0
14923                         settings["NOCOLOR"] = "true"
14924                 settings.backup_changes("NOCOLOR")
14925         elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
14926                 portage.output.havecolor = 0
14927                 settings["NOCOLOR"] = "true"
14928                 settings.backup_changes("NOCOLOR")
14929
14930 def apply_priorities(settings):
14931         ionice(settings)
14932         nice(settings)
14933
14934 def nice(settings):
14935         try:
14936                 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
14937         except (OSError, ValueError), e:
14938                 out = portage.output.EOutput()
14939                 out.eerror("Failed to change nice value to '%s'" % \
14940                         settings["PORTAGE_NICENESS"])
14941                 out.eerror("%s\n" % str(e))
14942
14943 def ionice(settings):
14944
14945         ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
14946         if ionice_cmd:
14947                 ionice_cmd = shlex.split(ionice_cmd)
14948         if not ionice_cmd:
14949                 return
14950
14951         from portage.util import varexpand
14952         variables = {"PID" : str(os.getpid())}
14953         cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
14954
14955         try:
14956                 rval = portage.process.spawn(cmd, env=os.environ)
14957         except portage.exception.CommandNotFound:
14958                 # The OS kernel probably doesn't support ionice,
14959                 # so return silently.
14960                 return
14961
14962         if rval != os.EX_OK:
14963                 out = portage.output.EOutput()
14964                 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
14965                 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
14966
14967 def display_missing_pkg_set(root_config, set_name):
14968
14969         msg = []
14970         msg.append(("emerge: There are no sets to satisfy '%s'. " + \
14971                 "The following sets exist:") % \
14972                 colorize("INFORM", set_name))
14973         msg.append("")
14974
14975         for s in sorted(root_config.sets):
14976                 msg.append("    %s" % s)
14977         msg.append("")
14978
14979         writemsg_level("".join("%s\n" % l for l in msg),
14980                 level=logging.ERROR, noiselevel=-1)
14981
14982 def expand_set_arguments(myfiles, myaction, root_config):
14983         retval = os.EX_OK
14984         setconfig = root_config.setconfig
14985
14986         sets = setconfig.getSets()
14987
14988         # In order to know exactly which atoms/sets should be added to the
14989         # world file, the depgraph performs set expansion later. It will get
14990         # confused about where the atoms came from if it's not allowed to
14991         # expand them itself.
14992         do_not_expand = (None, )
14993         newargs = []
14994         for a in myfiles:
14995                 if a in ("system", "world"):
14996                         newargs.append(SETPREFIX+a)
14997                 else:
14998                         newargs.append(a)
14999         myfiles = newargs
15000         del newargs
15001         newargs = []
15002
15003         # separators for set arguments
15004         ARG_START = "{"
15005         ARG_END = "}"
15006
15007         # WARNING: all operators must be of equal length
15008         IS_OPERATOR = "/@"
15009         DIFF_OPERATOR = "-@"
15010         UNION_OPERATOR = "+@"
15011         
15012         for i in range(0, len(myfiles)):
15013                 if myfiles[i].startswith(SETPREFIX):
15014                         start = 0
15015                         end = 0
15016                         x = myfiles[i][len(SETPREFIX):]
15017                         newset = ""
15018                         while x:
15019                                 start = x.find(ARG_START)
15020                                 end = x.find(ARG_END)
15021                                 if start > 0 and start < end:
15022                                         namepart = x[:start]
15023                                         argpart = x[start+1:end]
15024                                 
15025                                         # TODO: implement proper quoting
15026                                         args = argpart.split(",")
15027                                         options = {}
15028                                         for a in args:
15029                                                 if "=" in a:
15030                                                         k, v  = a.split("=", 1)
15031                                                         options[k] = v
15032                                                 else:
15033                                                         options[a] = "True"
15034                                         setconfig.update(namepart, options)
15035                                         newset += (x[:start-len(namepart)]+namepart)
15036                                         x = x[end+len(ARG_END):]
15037                                 else:
15038                                         newset += x
15039                                         x = ""
15040                         myfiles[i] = SETPREFIX+newset
15041                                 
15042         sets = setconfig.getSets()
15043
15044         # display errors that occured while loading the SetConfig instance
15045         for e in setconfig.errors:
15046                 print colorize("BAD", "Error during set creation: %s" % e)
15047         
15048         # emerge relies on the existance of sets with names "world" and "system"
15049         required_sets = ("world", "system")
15050         missing_sets = []
15051
15052         for s in required_sets:
15053                 if s not in sets:
15054                         missing_sets.append(s)
15055         if missing_sets:
15056                 if len(missing_sets) > 2:
15057                         missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
15058                         missing_sets_str += ', and "%s"' % missing_sets[-1]
15059                 elif len(missing_sets) == 2:
15060                         missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
15061                 else:
15062                         missing_sets_str = '"%s"' % missing_sets[-1]
15063                 msg = ["emerge: incomplete set configuration, " + \
15064                         "missing set(s): %s" % missing_sets_str]
15065                 if sets:
15066                         msg.append("        sets defined: %s" % ", ".join(sets))
15067                 msg.append("        This usually means that '%s'" % \
15068                         (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
15069                 msg.append("        is missing or corrupt.")
15070                 for line in msg:
15071                         writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
15072                 return (None, 1)
15073         unmerge_actions = ("unmerge", "prune", "clean", "depclean")
15074
15075         for a in myfiles:
15076                 if a.startswith(SETPREFIX):
15077                         # support simple set operations (intersection, difference and union)
15078                         # on the commandline. Expressions are evaluated strictly left-to-right
15079                         if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
15080                                 expression = a[len(SETPREFIX):]
15081                                 expr_sets = []
15082                                 expr_ops = []
15083                                 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
15084                                         is_pos = expression.rfind(IS_OPERATOR)
15085                                         diff_pos = expression.rfind(DIFF_OPERATOR)
15086                                         union_pos = expression.rfind(UNION_OPERATOR)
15087                                         op_pos = max(is_pos, diff_pos, union_pos)
15088                                         s1 = expression[:op_pos]
15089                                         s2 = expression[op_pos+len(IS_OPERATOR):]
15090                                         op = expression[op_pos:op_pos+len(IS_OPERATOR)]
15091                                         if not s2 in sets:
15092                                                 display_missing_pkg_set(root_config, s2)
15093                                                 return (None, 1)
15094                                         expr_sets.insert(0, s2)
15095                                         expr_ops.insert(0, op)
15096                                         expression = s1
15097                                 if not expression in sets:
15098                                         display_missing_pkg_set(root_config, expression)
15099                                         return (None, 1)
15100                                 expr_sets.insert(0, expression)
15101                                 result = set(setconfig.getSetAtoms(expression))
15102                                 for i in range(0, len(expr_ops)):
15103                                         s2 = setconfig.getSetAtoms(expr_sets[i+1])
15104                                         if expr_ops[i] == IS_OPERATOR:
15105                                                 result.intersection_update(s2)
15106                                         elif expr_ops[i] == DIFF_OPERATOR:
15107                                                 result.difference_update(s2)
15108                                         elif expr_ops[i] == UNION_OPERATOR:
15109                                                 result.update(s2)
15110                                         else:
15111                                                 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
15112                                 newargs.extend(result)
15113                         else:                   
15114                                 s = a[len(SETPREFIX):]
15115                                 if s not in sets:
15116                                         display_missing_pkg_set(root_config, s)
15117                                         return (None, 1)
15118                                 setconfig.active.append(s)
15119                                 try:
15120                                         set_atoms = setconfig.getSetAtoms(s)
15121                                 except portage.exception.PackageSetNotFound, e:
15122                                         writemsg_level(("emerge: the given set '%s' " + \
15123                                                 "contains a non-existent set named '%s'.\n") % \
15124                                                 (s, e), level=logging.ERROR, noiselevel=-1)
15125                                         return (None, 1)
15126                                 if myaction in unmerge_actions and \
15127                                                 not sets[s].supportsOperation("unmerge"):
15128                                         sys.stderr.write("emerge: the given set '%s' does " % s + \
15129                                                 "not support unmerge operations\n")
15130                                         retval = 1
15131                                 elif not set_atoms:
15132                                         print "emerge: '%s' is an empty set" % s
15133                                 elif myaction not in do_not_expand:
15134                                         newargs.extend(set_atoms)
15135                                 else:
15136                                         newargs.append(SETPREFIX+s)
15137                                 for e in sets[s].errors:
15138                                         print e
15139                 else:
15140                         newargs.append(a)
15141         return (newargs, retval)
15142
15143 def repo_name_check(trees):
15144         missing_repo_names = set()
15145         for root, root_trees in trees.iteritems():
15146                 if "porttree" in root_trees:
15147                         portdb = root_trees["porttree"].dbapi
15148                         missing_repo_names.update(portdb.porttrees)
15149                         repos = portdb.getRepositories()
15150                         for r in repos:
15151                                 missing_repo_names.discard(portdb.getRepositoryPath(r))
15152                         if portdb.porttree_root in missing_repo_names and \
15153                                 not os.path.exists(os.path.join(
15154                                 portdb.porttree_root, "profiles")):
15155                                 # This is normal if $PORTDIR happens to be empty,
15156                                 # so don't warn about it.
15157                                 missing_repo_names.remove(portdb.porttree_root)
15158
15159         if missing_repo_names:
15160                 msg = []
15161                 msg.append("WARNING: One or more repositories " + \
15162                         "have missing repo_name entries:")
15163                 msg.append("")
15164                 for p in missing_repo_names:
15165                         msg.append("\t%s/profiles/repo_name" % (p,))
15166                 msg.append("")
15167                 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
15168                         "should be a plain text file containing a unique " + \
15169                         "name for the repository on the first line.", 70))
15170                 writemsg_level("".join("%s\n" % l for l in msg),
15171                         level=logging.WARNING, noiselevel=-1)
15172
15173         return bool(missing_repo_names)
15174
15175 def config_protect_check(trees):
15176         for root, root_trees in trees.iteritems():
15177                 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
15178                         msg = "!!! CONFIG_PROTECT is empty"
15179                         if root != "/":
15180                                 msg += " for '%s'" % root
15181                         writemsg_level(msg, level=logging.WARN, noiselevel=-1)
15182
15183 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
15184
15185         if "--quiet" in myopts:
15186                 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15187                 print "!!! one of the following fully-qualified ebuild names instead:\n"
15188                 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15189                         print "    " + colorize("INFORM", cp)
15190                 return
15191
15192         s = search(root_config, spinner, "--searchdesc" in myopts,
15193                 "--quiet" not in myopts, "--usepkg" in myopts,
15194                 "--usepkgonly" in myopts)
15195         null_cp = portage.dep_getkey(insert_category_into_atom(
15196                 arg, "null"))
15197         cat, atom_pn = portage.catsplit(null_cp)
15198         s.searchkey = atom_pn
15199         for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15200                 s.addCP(cp)
15201         s.output()
15202         print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15203         print "!!! one of the above fully-qualified ebuild names instead.\n"
15204
15205 def profile_check(trees, myaction, myopts):
15206         if myaction in ("info", "sync"):
15207                 return os.EX_OK
15208         elif "--version" in myopts or "--help" in myopts:
15209                 return os.EX_OK
15210         for root, root_trees in trees.iteritems():
15211                 if root_trees["root_config"].settings.profiles:
15212                         continue
15213                 # generate some profile related warning messages
15214                 validate_ebuild_environment(trees)
15215                 msg = "If you have just changed your profile configuration, you " + \
15216                         "should revert back to the previous configuration. Due to " + \
15217                         "your current profile being invalid, allowed actions are " + \
15218                         "limited to --help, --info, --sync, and --version."
15219                 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
15220                         level=logging.ERROR, noiselevel=-1)
15221                 return 1
15222         return os.EX_OK
15223
15224 def emerge_main():
15225         global portage  # NFC why this is necessary now - genone
15226         portage._disable_legacy_globals()
15227         # Disable color until we're sure that it should be enabled (after
15228         # EMERGE_DEFAULT_OPTS has been parsed).
15229         portage.output.havecolor = 0
15230         # This first pass is just for options that need to be known as early as
15231         # possible, such as --config-root.  They will be parsed again later,
15232         # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
15233         # the value of --config-root).
15234         myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
15235         if "--debug" in myopts:
15236                 os.environ["PORTAGE_DEBUG"] = "1"
15237         if "--config-root" in myopts:
15238                 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
15239
15240         # Portage needs to ensure a sane umask for the files it creates.
15241         os.umask(022)
15242         settings, trees, mtimedb = load_emerge_config()
15243         portdb = trees[settings["ROOT"]]["porttree"].dbapi
15244         rval = profile_check(trees, myaction, myopts)
15245         if rval != os.EX_OK:
15246                 return rval
15247
15248         if portage._global_updates(trees, mtimedb["updates"]):
15249                 mtimedb.commit()
15250                 # Reload the whole config from scratch.
15251                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15252                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15253
15254         xterm_titles = "notitles" not in settings.features
15255
15256         tmpcmdline = []
15257         if "--ignore-default-opts" not in myopts:
15258                 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
15259         tmpcmdline.extend(sys.argv[1:])
15260         myaction, myopts, myfiles = parse_opts(tmpcmdline)
15261
15262         if "--digest" in myopts:
15263                 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
15264                 # Reload the whole config from scratch so that the portdbapi internal
15265                 # config is updated with new FEATURES.
15266                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15267                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15268
15269         for myroot in trees:
15270                 mysettings =  trees[myroot]["vartree"].settings
15271                 mysettings.unlock()
15272                 adjust_config(myopts, mysettings)
15273                 if "--pretend" not in myopts:
15274                         mysettings["PORTAGE_COUNTER_HASH"] = \
15275                                 trees[myroot]["vartree"].dbapi._counter_hash()
15276                         mysettings.backup_changes("PORTAGE_COUNTER_HASH")
15277                 mysettings.lock()
15278                 del myroot, mysettings
15279
15280         apply_priorities(settings)
15281
15282         spinner = stdout_spinner()
15283         if "candy" in settings.features:
15284                 spinner.update = spinner.update_scroll
15285
15286         if "--quiet" not in myopts:
15287                 portage.deprecated_profile_check(settings=settings)
15288                 repo_name_check(trees)
15289                 config_protect_check(trees)
15290
15291         eclasses_overridden = {}
15292         for mytrees in trees.itervalues():
15293                 mydb = mytrees["porttree"].dbapi
15294                 # Freeze the portdbapi for performance (memoize all xmatch results).
15295                 mydb.freeze()
15296                 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
15297         del mytrees, mydb
15298
15299         if eclasses_overridden and \
15300                 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
15301                 prefix = bad(" * ")
15302                 if len(eclasses_overridden) == 1:
15303                         writemsg(prefix + "Overlay eclass overrides " + \
15304                                 "eclass from PORTDIR:\n", noiselevel=-1)
15305                 else:
15306                         writemsg(prefix + "Overlay eclasses override " + \
15307                                 "eclasses from PORTDIR:\n", noiselevel=-1)
15308                 writemsg(prefix + "\n", noiselevel=-1)
15309                 for eclass_name in sorted(eclasses_overridden):
15310                         writemsg(prefix + "  '%s/%s.eclass'\n" % \
15311                                 (eclasses_overridden[eclass_name], eclass_name),
15312                                 noiselevel=-1)
15313                 writemsg(prefix + "\n", noiselevel=-1)
15314                 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
15315                 "because it will trigger invalidation of cached ebuild metadata " + \
15316                 "that is distributed with the portage tree. If you must " + \
15317                 "override eclasses from PORTDIR then you are advised to add " + \
15318                 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
15319                 "`emerge --regen` after each time that you run `emerge --sync`. " + \
15320                 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
15321                 "you would like to disable this warning."
15322                 from textwrap import wrap
15323                 for line in wrap(msg, 72):
15324                         writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
15325
15326         if "moo" in myfiles:
15327                 print """
15328
15329   Larry loves Gentoo (""" + platform.system() + """)
15330
15331  _______________________
15332 < Have you mooed today? >
15333  -----------------------
15334         \   ^__^
15335          \  (oo)\_______
15336             (__)\       )\/\ 
15337                 ||----w |
15338                 ||     ||
15339
15340 """
15341
15342         for x in myfiles:
15343                 ext = os.path.splitext(x)[1]
15344                 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
15345                         print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
15346                         break
15347
15348         root_config = trees[settings["ROOT"]]["root_config"]
15349         if myaction == "list-sets":
15350                 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
15351                 sys.stdout.flush()
15352                 return os.EX_OK
15353
15354         # only expand sets for actions taking package arguments
15355         oldargs = myfiles[:]
15356         if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
15357                 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
15358                 if retval != os.EX_OK:
15359                         return retval
15360
15361                 # Need to handle empty sets specially, otherwise emerge will react 
15362                 # with the help message for empty argument lists
15363                 if oldargs and not myfiles:
15364                         print "emerge: no targets left after set expansion"
15365                         return 0
15366
15367         if ("--tree" in myopts) and ("--columns" in myopts):
15368                 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
15369                 return 1
15370
15371         if ("--quiet" in myopts):
15372                 spinner.update = spinner.update_quiet
15373                 portage.util.noiselimit = -1
15374
15375         # Always create packages if FEATURES=buildpkg
15376         # Imply --buildpkg if --buildpkgonly
15377         if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
15378                 if "--buildpkg" not in myopts:
15379                         myopts["--buildpkg"] = True
15380
15381         # Also allow -S to invoke search action (-sS)
15382         if ("--searchdesc" in myopts):
15383                 if myaction and myaction != "search":
15384                         myfiles.append(myaction)
15385                 if "--search" not in myopts:
15386                         myopts["--search"] = True
15387                 myaction = "search"
15388
15389         # Always try and fetch binary packages if FEATURES=getbinpkg
15390         if ("getbinpkg" in settings.features):
15391                 myopts["--getbinpkg"] = True
15392
15393         if "--buildpkgonly" in myopts:
15394                 # --buildpkgonly will not merge anything, so
15395                 # it cancels all binary package options.
15396                 for opt in ("--getbinpkg", "--getbinpkgonly",
15397                         "--usepkg", "--usepkgonly"):
15398                         myopts.pop(opt, None)
15399
15400         if "--fetch-all-uri" in myopts:
15401                 myopts["--fetchonly"] = True
15402
15403         if "--skipfirst" in myopts and "--resume" not in myopts:
15404                 myopts["--resume"] = True
15405
15406         if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
15407                 myopts["--usepkgonly"] = True
15408
15409         if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
15410                 myopts["--getbinpkg"] = True
15411
15412         if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
15413                 myopts["--usepkg"] = True
15414
15415         # Also allow -K to apply --usepkg/-k
15416         if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
15417                 myopts["--usepkg"] = True
15418
15419         # Allow -p to remove --ask
15420         if ("--pretend" in myopts) and ("--ask" in myopts):
15421                 print ">>> --pretend disables --ask... removing --ask from options."
15422                 del myopts["--ask"]
15423
15424         # forbid --ask when not in a terminal
15425         # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
15426         if ("--ask" in myopts) and (not sys.stdin.isatty()):
15427                 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
15428                         noiselevel=-1)
15429                 return 1
15430
15431         if settings.get("PORTAGE_DEBUG", "") == "1":
15432                 spinner.update = spinner.update_quiet
15433                 portage.debug=1
15434                 if "python-trace" in settings.features:
15435                         import portage.debug
15436                         portage.debug.set_trace(True)
15437
15438         if not ("--quiet" in myopts):
15439                 if not sys.stdout.isatty() or ("--nospinner" in myopts):
15440                         spinner.update = spinner.update_basic
15441
15442         if myaction == 'version':
15443                 print getportageversion(settings["PORTDIR"], settings["ROOT"],
15444                         settings.profile_path, settings["CHOST"],
15445                         trees[settings["ROOT"]]["vartree"].dbapi)
15446                 return 0
15447         elif "--help" in myopts:
15448                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15449                 return 0
15450
15451         if "--debug" in myopts:
15452                 print "myaction", myaction
15453                 print "myopts", myopts
15454
15455         if not myaction and not myfiles and "--resume" not in myopts:
15456                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15457                 return 1
15458
15459         pretend = "--pretend" in myopts
15460         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
15461         buildpkgonly = "--buildpkgonly" in myopts
15462
15463         # check if root user is the current user for the actions where emerge needs this
15464         if portage.secpass < 2:
15465                 # We've already allowed "--version" and "--help" above.
15466                 if "--pretend" not in myopts and myaction not in ("search","info"):
15467                         need_superuser = not \
15468                                 (fetchonly or \
15469                                 (buildpkgonly and secpass >= 1) or \
15470                                 myaction in ("metadata", "regen") or \
15471                                 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
15472                         if portage.secpass < 1 or \
15473                                 need_superuser:
15474                                 if need_superuser:
15475                                         access_desc = "superuser"
15476                                 else:
15477                                         access_desc = "portage group"
15478                                 # Always show portage_group_warning() when only portage group
15479                                 # access is required but the user is not in the portage group.
15480                                 from portage.data import portage_group_warning
15481                                 if "--ask" in myopts:
15482                                         myopts["--pretend"] = True
15483                                         del myopts["--ask"]
15484                                         print ("%s access is required... " + \
15485                                                 "adding --pretend to options.\n") % access_desc
15486                                         if portage.secpass < 1 and not need_superuser:
15487                                                 portage_group_warning()
15488                                 else:
15489                                         sys.stderr.write(("emerge: %s access is " + \
15490                                                 "required.\n\n") % access_desc)
15491                                         if portage.secpass < 1 and not need_superuser:
15492                                                 portage_group_warning()
15493                                         return 1
15494
15495         disable_emergelog = False
15496         for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
15497                 if x in myopts:
15498                         disable_emergelog = True
15499                         break
15500         if myaction in ("search", "info"):
15501                 disable_emergelog = True
15502         if disable_emergelog:
15503                 """ Disable emergelog for everything except build or unmerge
15504                 operations.  This helps minimize parallel emerge.log entries that can
15505                 confuse log parsers.  We especially want it disabled during
15506                 parallel-fetch, which uses --resume --fetchonly."""
15507                 global emergelog
15508                 def emergelog(*pargs, **kargs):
15509                         pass
15510
15511         if not "--pretend" in myopts:
15512                 emergelog(xterm_titles, "Started emerge on: "+\
15513                         time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
15514                 myelogstr=""
15515                 if myopts:
15516                         myelogstr=" ".join(myopts)
15517                 if myaction:
15518                         myelogstr+=" "+myaction
15519                 if myfiles:
15520                         myelogstr += " " + " ".join(oldargs)
15521                 emergelog(xterm_titles, " *** emerge " + myelogstr)
15522         del oldargs
15523
15524         def emergeexitsig(signum, frame):
15525                 signal.signal(signal.SIGINT, signal.SIG_IGN)
15526                 signal.signal(signal.SIGTERM, signal.SIG_IGN)
15527                 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
15528                 sys.exit(100+signum)
15529         signal.signal(signal.SIGINT, emergeexitsig)
15530         signal.signal(signal.SIGTERM, emergeexitsig)
15531
15532         def emergeexit():
15533                 """This gets out final log message in before we quit."""
15534                 if "--pretend" not in myopts:
15535                         emergelog(xterm_titles, " *** terminating.")
15536                 if "notitles" not in settings.features:
15537                         xtermTitleReset()
15538         portage.atexit_register(emergeexit)
15539
15540         if myaction in ("config", "metadata", "regen", "sync"):
15541                 if "--pretend" in myopts:
15542                         sys.stderr.write(("emerge: The '%s' action does " + \
15543                                 "not support '--pretend'.\n") % myaction)
15544                         return 1
15545
15546         if "sync" == myaction:
15547                 return action_sync(settings, trees, mtimedb, myopts, myaction)
15548         elif "metadata" == myaction:
15549                 action_metadata(settings, portdb, myopts)
15550         elif myaction=="regen":
15551                 validate_ebuild_environment(trees)
15552                 return action_regen(settings, portdb, myopts.get("--jobs"),
15553                         myopts.get("--load-average"))
15554         # HELP action
15555         elif "config"==myaction:
15556                 validate_ebuild_environment(trees)
15557                 action_config(settings, trees, myopts, myfiles)
15558
15559         # SEARCH action
15560         elif "search"==myaction:
15561                 validate_ebuild_environment(trees)
15562                 action_search(trees[settings["ROOT"]]["root_config"],
15563                         myopts, myfiles, spinner)
15564         elif myaction in ("clean", "unmerge") or \
15565                 (myaction == "prune" and "--nodeps" in myopts):
15566                 validate_ebuild_environment(trees)
15567
15568                 # Ensure atoms are valid before calling unmerge().
15569                 # For backward compat, leading '=' is not required.
15570                 for x in myfiles:
15571                         if is_valid_package_atom(x) or \
15572                                 is_valid_package_atom("=" + x):
15573                                 continue
15574                         msg = []
15575                         msg.append("'%s' is not a valid package atom." % (x,))
15576                         msg.append("Please check ebuild(5) for full details.")
15577                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15578                                 level=logging.ERROR, noiselevel=-1)
15579                         return 1
15580
15581                 # When given a list of atoms, unmerge
15582                 # them in the order given.
15583                 ordered = myaction == "unmerge"
15584                 if 1 == unmerge(root_config, myopts, myaction, myfiles,
15585                         mtimedb["ldpath"], ordered=ordered):
15586                         if not (buildpkgonly or fetchonly or pretend):
15587                                 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15588
15589         elif myaction in ("depclean", "info", "prune"):
15590
15591                 # Ensure atoms are valid before calling unmerge().
15592                 vardb = trees[settings["ROOT"]]["vartree"].dbapi
15593                 valid_atoms = []
15594                 for x in myfiles:
15595                         if is_valid_package_atom(x):
15596                                 try:
15597                                         valid_atoms.append(
15598                                                 portage.dep_expand(x, mydb=vardb, settings=settings))
15599                                 except portage.exception.AmbiguousPackageName, e:
15600                                         msg = "The short ebuild name \"" + x + \
15601                                                 "\" is ambiguous.  Please specify " + \
15602                                                 "one of the following " + \
15603                                                 "fully-qualified ebuild names instead:"
15604                                         for line in textwrap.wrap(msg, 70):
15605                                                 writemsg_level("!!! %s\n" % (line,),
15606                                                         level=logging.ERROR, noiselevel=-1)
15607                                         for i in e[0]:
15608                                                 writemsg_level("    %s\n" % colorize("INFORM", i),
15609                                                         level=logging.ERROR, noiselevel=-1)
15610                                         writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
15611                                         return 1
15612                                 continue
15613                         msg = []
15614                         msg.append("'%s' is not a valid package atom." % (x,))
15615                         msg.append("Please check ebuild(5) for full details.")
15616                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15617                                 level=logging.ERROR, noiselevel=-1)
15618                         return 1
15619
15620                 if myaction == "info":
15621                         return action_info(settings, trees, myopts, valid_atoms)
15622
15623                 validate_ebuild_environment(trees)
15624                 action_depclean(settings, trees, mtimedb["ldpath"],
15625                         myopts, myaction, valid_atoms, spinner)
15626                 if not (buildpkgonly or fetchonly or pretend):
15627                         post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15628         # "update", "system", or just process files:
15629         else:
15630                 validate_ebuild_environment(trees)
15631                 if "--pretend" not in myopts:
15632                         display_news_notification(root_config, myopts)
15633                 retval = action_build(settings, trees, mtimedb,
15634                         myopts, myaction, myfiles, spinner)
15635                 root_config = trees[settings["ROOT"]]["root_config"]
15636                 post_emerge(root_config, myopts, mtimedb, retval)
15637
15638                 return retval