When ACCEPT_CHOSTS is set, enable CHOST masking for unbuilt ebuilds. This
[portage.git] / pym / _emerge / __init__.py
1 #!/usr/bin/python -O
2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id$
5
6 import array
7 from collections import deque
8 import fcntl
9 import formatter
10 import logging
11 import pwd
12 import select
13 import shlex
14 import shutil
15 import signal
16 import sys
17 import textwrap
18 import urlparse
19 import weakref
20 import gc
21 import os, stat
22 import platform
23
24 try:
25         import portage
26 except ImportError:
27         from os import path as osp
28         sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
29         import portage
30
31 from portage import digraph
32 from portage.const import NEWS_LIB_PATH
33
34 import _emerge.help
35 import portage.xpak, commands, errno, re, socket, time
36 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
37         nc_len, red, teal, turquoise, xtermTitle, \
38         xtermTitleReset, yellow
39 from portage.output import create_color_func
40 good = create_color_func("GOOD")
41 bad = create_color_func("BAD")
42 # white looks bad on terminals with white background
43 from portage.output import bold as white
44
45 import portage.elog
46 import portage.dep
47 portage.dep._dep_check_strict = True
48 import portage.util
49 import portage.locks
50 import portage.exception
51 from portage.data import secpass
52 from portage.elog.messages import eerror
53 from portage.util import normalize_path as normpath
54 from portage.util import cmp_sort_key, writemsg, writemsg_level
55 from portage.sets import load_default_config, SETPREFIX
56 from portage.sets.base import InternalPackageSet
57
58 from itertools import chain, izip
59
60 try:
61         import cPickle as pickle
62 except ImportError:
63         import pickle
64
65 try:
66         from cStringIO import StringIO
67 except ImportError:
68         from StringIO import StringIO
69
70 class stdout_spinner(object):
71         scroll_msgs = [
72                 "Gentoo Rocks ("+platform.system()+")",
73                 "Thank you for using Gentoo. :)",
74                 "Are you actually trying to read this?",
75                 "How many times have you stared at this?",
76                 "We are generating the cache right now",
77                 "You are paying too much attention.",
78                 "A theory is better than its explanation.",
79                 "Phasers locked on target, Captain.",
80                 "Thrashing is just virtual crashing.",
81                 "To be is to program.",
82                 "Real Users hate Real Programmers.",
83                 "When all else fails, read the instructions.",
84                 "Functionality breeds Contempt.",
85                 "The future lies ahead.",
86                 "3.1415926535897932384626433832795028841971694",
87                 "Sometimes insanity is the only alternative.",
88                 "Inaccuracy saves a world of explanation.",
89         ]
90
91         twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
92
93         def __init__(self):
94                 self.spinpos = 0
95                 self.update = self.update_twirl
96                 self.scroll_sequence = self.scroll_msgs[
97                         int(time.time() * 100) % len(self.scroll_msgs)]
98                 self.last_update = 0
99                 self.min_display_latency = 0.05
100
101         def _return_early(self):
102                 """
103                 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
104                 each update* method should return without doing any output when this
105                 method returns True.
106                 """
107                 cur_time = time.time()
108                 if cur_time - self.last_update < self.min_display_latency:
109                         return True
110                 self.last_update = cur_time
111                 return False
112
113         def update_basic(self):
114                 self.spinpos = (self.spinpos + 1) % 500
115                 if self._return_early():
116                         return
117                 if (self.spinpos % 100) == 0:
118                         if self.spinpos == 0:
119                                 sys.stdout.write(". ")
120                         else:
121                                 sys.stdout.write(".")
122                 sys.stdout.flush()
123
124         def update_scroll(self):
125                 if self._return_early():
126                         return
127                 if(self.spinpos >= len(self.scroll_sequence)):
128                         sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
129                                 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
130                 else:
131                         sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
132                 sys.stdout.flush()
133                 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
134
135         def update_twirl(self):
136                 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
137                 if self._return_early():
138                         return
139                 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
140                 sys.stdout.flush()
141
142         def update_quiet(self):
143                 return
144
145 def userquery(prompt, responses=None, colours=None):
146         """Displays a prompt and a set of responses, then waits for a response
147         which is checked against the responses and the first to match is
148         returned.  An empty response will match the first value in responses.  The
149         input buffer is *not* cleared prior to the prompt!
150
151         prompt: a String.
152         responses: a List of Strings.
153         colours: a List of Functions taking and returning a String, used to
154         process the responses for display. Typically these will be functions
155         like red() but could be e.g. lambda x: "DisplayString".
156         If responses is omitted, defaults to ["Yes", "No"], [green, red].
157         If only colours is omitted, defaults to [bold, ...].
158
159         Returns a member of the List responses. (If called without optional
160         arguments, returns "Yes" or "No".)
161         KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
162         printed."""
163         if responses is None:
164                 responses = ["Yes", "No"]
165                 colours = [
166                         create_color_func("PROMPT_CHOICE_DEFAULT"),
167                         create_color_func("PROMPT_CHOICE_OTHER")
168                 ]
169         elif colours is None:
170                 colours=[bold]
171         colours=(colours*len(responses))[:len(responses)]
172         print bold(prompt),
173         try:
174                 while True:
175                         response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
176                         for key in responses:
177                                 # An empty response will match the first value in responses.
178                                 if response.upper()==key[:len(response)].upper():
179                                         return key
180                         print "Sorry, response '%s' not understood." % response,
181         except (EOFError, KeyboardInterrupt):
182                 print "Interrupted."
183                 sys.exit(1)
184
185 actions = frozenset([
186 "clean", "config", "depclean",
187 "info", "list-sets", "metadata",
188 "prune", "regen",  "search",
189 "sync",  "unmerge",
190 ])
191 options=[
192 "--ask",          "--alphabetical",
193 "--buildpkg",     "--buildpkgonly",
194 "--changelog",    "--columns",
195 "--complete-graph",
196 "--debug",        "--deep",
197 "--digest",
198 "--emptytree",
199 "--fetchonly",    "--fetch-all-uri",
200 "--getbinpkg",    "--getbinpkgonly",
201 "--help",         "--ignore-default-opts",
202 "--keep-going",
203 "--noconfmem",
204 "--newuse",       "--nocolor",
205 "--nodeps",       "--noreplace",
206 "--nospinner",    "--oneshot",
207 "--onlydeps",     "--pretend",
208 "--quiet",        "--resume",
209 "--searchdesc",   "--selective",
210 "--skipfirst",
211 "--tree",
212 "--update",
213 "--usepkg",       "--usepkgonly",
214 "--verbose",      "--version"
215 ]
216
217 shortmapping={
218 "1":"--oneshot",
219 "a":"--ask",
220 "b":"--buildpkg",  "B":"--buildpkgonly",
221 "c":"--clean",     "C":"--unmerge",
222 "d":"--debug",     "D":"--deep",
223 "e":"--emptytree",
224 "f":"--fetchonly", "F":"--fetch-all-uri",
225 "g":"--getbinpkg", "G":"--getbinpkgonly",
226 "h":"--help",
227 "k":"--usepkg",    "K":"--usepkgonly",
228 "l":"--changelog",
229 "n":"--noreplace", "N":"--newuse",
230 "o":"--onlydeps",  "O":"--nodeps",
231 "p":"--pretend",   "P":"--prune",
232 "q":"--quiet",
233 "s":"--search",    "S":"--searchdesc",
234 "t":"--tree",
235 "u":"--update",
236 "v":"--verbose",   "V":"--version"
237 }
238
239 def emergelog(xterm_titles, mystr, short_msg=None):
240         if xterm_titles and short_msg:
241                 if "HOSTNAME" in os.environ:
242                         short_msg = os.environ["HOSTNAME"]+": "+short_msg
243                 xtermTitle(short_msg)
244         try:
245                 file_path = "/var/log/emerge.log"
246                 mylogfile = open(file_path, "a")
247                 portage.util.apply_secpass_permissions(file_path,
248                         uid=portage.portage_uid, gid=portage.portage_gid,
249                         mode=0660)
250                 mylock = None
251                 try:
252                         mylock = portage.locks.lockfile(mylogfile)
253                         # seek because we may have gotten held up by the lock.
254                         # if so, we may not be positioned at the end of the file.
255                         mylogfile.seek(0, 2)
256                         mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
257                         mylogfile.flush()
258                 finally:
259                         if mylock:
260                                 portage.locks.unlockfile(mylock)
261                         mylogfile.close()
262         except (IOError,OSError,portage.exception.PortageException), e:
263                 if secpass >= 1:
264                         print >> sys.stderr, "emergelog():",e
265
266 def countdown(secs=5, doing="Starting"):
267         if secs:
268                 print ">>> Waiting",secs,"seconds before starting..."
269                 print ">>> (Control-C to abort)...\n"+doing+" in: ",
270                 ticks=range(secs)
271                 ticks.reverse()
272                 for sec in ticks:
273                         sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
274                         sys.stdout.flush()
275                         time.sleep(1)
276                 print
277
278 # formats a size given in bytes nicely
279 def format_size(mysize):
280         if isinstance(mysize, basestring):
281                 return mysize
282         if 0 != mysize % 1024:
283                 # Always round up to the next kB so that it doesn't show 0 kB when
284                 # some small file still needs to be fetched.
285                 mysize += 1024 - mysize % 1024
286         mystr=str(mysize/1024)
287         mycount=len(mystr)
288         while (mycount > 3):
289                 mycount-=3
290                 mystr=mystr[:mycount]+","+mystr[mycount:]
291         return mystr+" kB"
292
293
294 def getgccversion(chost):
295         """
296         rtype: C{str}
297         return:  the current in-use gcc version
298         """
299
300         gcc_ver_command = 'gcc -dumpversion'
301         gcc_ver_prefix = 'gcc-'
302
303         gcc_not_found_error = red(
304         "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
305         "!!! to update the environment of this terminal and possibly\n" +
306         "!!! other terminals also.\n"
307         )
308
309         mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
310         if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
311                 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
312
313         mystatus, myoutput = commands.getstatusoutput(
314                 chost + "-" + gcc_ver_command)
315         if mystatus == os.EX_OK:
316                 return gcc_ver_prefix + myoutput
317
318         mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
319         if mystatus == os.EX_OK:
320                 return gcc_ver_prefix + myoutput
321
322         portage.writemsg(gcc_not_found_error, noiselevel=-1)
323         return "[unavailable]"
324
325 def getportageversion(portdir, target_root, profile, chost, vardb):
326         profilever = "unavailable"
327         if profile:
328                 realpath = os.path.realpath(profile)
329                 basepath   = os.path.realpath(os.path.join(portdir, "profiles"))
330                 if realpath.startswith(basepath):
331                         profilever = realpath[1 + len(basepath):]
332                 else:
333                         try:
334                                 profilever = "!" + os.readlink(profile)
335                         except (OSError):
336                                 pass
337                 del realpath, basepath
338
339         libcver=[]
340         libclist  = vardb.match("virtual/libc")
341         libclist += vardb.match("virtual/glibc")
342         libclist  = portage.util.unique_array(libclist)
343         for x in libclist:
344                 xs=portage.catpkgsplit(x)
345                 if libcver:
346                         libcver+=","+"-".join(xs[1:])
347                 else:
348                         libcver="-".join(xs[1:])
349         if libcver==[]:
350                 libcver="unavailable"
351
352         gccver = getgccversion(chost)
353         unameout=platform.release()+" "+platform.machine()
354
355         return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
356
357 def create_depgraph_params(myopts, myaction):
358         #configure emerge engine parameters
359         #
360         # self:      include _this_ package regardless of if it is merged.
361         # selective: exclude the package if it is merged
362         # recurse:   go into the dependencies
363         # deep:      go into the dependencies of already merged packages
364         # empty:     pretend nothing is merged
365         # complete:  completely account for all known dependencies
366         # remove:    build graph for use in removing packages
367         myparams = set(["recurse"])
368
369         if myaction == "remove":
370                 myparams.add("remove")
371                 myparams.add("complete")
372                 return myparams
373
374         if "--update" in myopts or \
375                 "--newuse" in myopts or \
376                 "--reinstall" in myopts or \
377                 "--noreplace" in myopts:
378                 myparams.add("selective")
379         if "--emptytree" in myopts:
380                 myparams.add("empty")
381                 myparams.discard("selective")
382         if "--nodeps" in myopts:
383                 myparams.discard("recurse")
384         if "--deep" in myopts:
385                 myparams.add("deep")
386         if "--complete-graph" in myopts:
387                 myparams.add("complete")
388         return myparams
389
390 # search functionality
391 class search(object):
392
393         #
394         # class constants
395         #
396         VERSION_SHORT=1
397         VERSION_RELEASE=2
398
399         #
400         # public interface
401         #
402         def __init__(self, root_config, spinner, searchdesc,
403                 verbose, usepkg, usepkgonly):
404                 """Searches the available and installed packages for the supplied search key.
405                 The list of available and installed packages is created at object instantiation.
406                 This makes successive searches faster."""
407                 self.settings = root_config.settings
408                 self.vartree = root_config.trees["vartree"]
409                 self.spinner = spinner
410                 self.verbose = verbose
411                 self.searchdesc = searchdesc
412                 self.root_config = root_config
413                 self.setconfig = root_config.setconfig
414                 self.matches = {"pkg" : []}
415                 self.mlen = 0
416
417                 def fake_portdb():
418                         pass
419                 self.portdb = fake_portdb
420                 for attrib in ("aux_get", "cp_all",
421                         "xmatch", "findname", "getFetchMap"):
422                         setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
423
424                 self._dbs = []
425
426                 portdb = root_config.trees["porttree"].dbapi
427                 bindb = root_config.trees["bintree"].dbapi
428                 vardb = root_config.trees["vartree"].dbapi
429
430                 if not usepkgonly and portdb._have_root_eclass_dir:
431                         self._dbs.append(portdb)
432
433                 if (usepkg or usepkgonly) and bindb.cp_all():
434                         self._dbs.append(bindb)
435
436                 self._dbs.append(vardb)
437                 self._portdb = portdb
438
439         def _cp_all(self):
440                 cp_all = set()
441                 for db in self._dbs:
442                         cp_all.update(db.cp_all())
443                 return list(sorted(cp_all))
444
445         def _aux_get(self, *args, **kwargs):
446                 for db in self._dbs:
447                         try:
448                                 return db.aux_get(*args, **kwargs)
449                         except KeyError:
450                                 pass
451                 raise
452
453         def _findname(self, *args, **kwargs):
454                 for db in self._dbs:
455                         if db is not self._portdb:
456                                 # We don't want findname to return anything
457                                 # unless it's an ebuild in a portage tree.
458                                 # Otherwise, it's already built and we don't
459                                 # care about it.
460                                 continue
461                         func = getattr(db, "findname", None)
462                         if func:
463                                 value = func(*args, **kwargs)
464                                 if value:
465                                         return value
466                 return None
467
468         def _getFetchMap(self, *args, **kwargs):
469                 for db in self._dbs:
470                         func = getattr(db, "getFetchMap", None)
471                         if func:
472                                 value = func(*args, **kwargs)
473                                 if value:
474                                         return value
475                 return {}
476
477         def _visible(self, db, cpv, metadata):
478                 installed = db is self.vartree.dbapi
479                 built = installed or db is not self._portdb
480                 pkg_type = "ebuild"
481                 if installed:
482                         pkg_type = "installed"
483                 elif built:
484                         pkg_type = "binary"
485                 return visible(self.settings,
486                         Package(type_name=pkg_type, root_config=self.root_config,
487                         cpv=cpv, built=built, installed=installed, metadata=metadata))
488
489         def _xmatch(self, level, atom):
490                 """
491                 This method does not expand old-style virtuals because it
492                 is restricted to returning matches for a single ${CATEGORY}/${PN}
493                 and old-style virual matches unreliable for that when querying
494                 multiple package databases. If necessary, old-style virtuals
495                 can be performed on atoms prior to calling this method.
496                 """
497                 cp = portage.dep_getkey(atom)
498                 if level == "match-all":
499                         matches = set()
500                         for db in self._dbs:
501                                 if hasattr(db, "xmatch"):
502                                         matches.update(db.xmatch(level, atom))
503                                 else:
504                                         matches.update(db.match(atom))
505                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
506                         db._cpv_sort_ascending(result)
507                 elif level == "match-visible":
508                         matches = set()
509                         for db in self._dbs:
510                                 if hasattr(db, "xmatch"):
511                                         matches.update(db.xmatch(level, atom))
512                                 else:
513                                         db_keys = list(db._aux_cache_keys)
514                                         for cpv in db.match(atom):
515                                                 metadata = izip(db_keys,
516                                                         db.aux_get(cpv, db_keys))
517                                                 if not self._visible(db, cpv, metadata):
518                                                         continue
519                                                 matches.add(cpv)
520                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
521                         db._cpv_sort_ascending(result)
522                 elif level == "bestmatch-visible":
523                         result = None
524                         for db in self._dbs:
525                                 if hasattr(db, "xmatch"):
526                                         cpv = db.xmatch("bestmatch-visible", atom)
527                                         if not cpv or portage.cpv_getkey(cpv) != cp:
528                                                 continue
529                                         if not result or cpv == portage.best([cpv, result]):
530                                                 result = cpv
531                                 else:
532                                         db_keys = Package.metadata_keys
533                                         # break out of this loop with highest visible
534                                         # match, checked in descending order
535                                         for cpv in reversed(db.match(atom)):
536                                                 if portage.cpv_getkey(cpv) != cp:
537                                                         continue
538                                                 metadata = izip(db_keys,
539                                                         db.aux_get(cpv, db_keys))
540                                                 if not self._visible(db, cpv, metadata):
541                                                         continue
542                                                 if not result or cpv == portage.best([cpv, result]):
543                                                         result = cpv
544                                                 break
545                 else:
546                         raise NotImplementedError(level)
547                 return result
548
549         def execute(self,searchkey):
550                 """Performs the search for the supplied search key"""
551                 match_category = 0
552                 self.searchkey=searchkey
553                 self.packagematches = []
554                 if self.searchdesc:
555                         self.searchdesc=1
556                         self.matches = {"pkg":[], "desc":[], "set":[]}
557                 else:
558                         self.searchdesc=0
559                         self.matches = {"pkg":[], "set":[]}
560                 print "Searching...   ",
561
562                 regexsearch = False
563                 if self.searchkey.startswith('%'):
564                         regexsearch = True
565                         self.searchkey = self.searchkey[1:]
566                 if self.searchkey.startswith('@'):
567                         match_category = 1
568                         self.searchkey = self.searchkey[1:]
569                 if regexsearch:
570                         self.searchre=re.compile(self.searchkey,re.I)
571                 else:
572                         self.searchre=re.compile(re.escape(self.searchkey), re.I)
573                 for package in self.portdb.cp_all():
574                         self.spinner.update()
575
576                         if match_category:
577                                 match_string  = package[:]
578                         else:
579                                 match_string  = package.split("/")[-1]
580
581                         masked=0
582                         if self.searchre.search(match_string):
583                                 if not self.portdb.xmatch("match-visible", package):
584                                         masked=1
585                                 self.matches["pkg"].append([package,masked])
586                         elif self.searchdesc: # DESCRIPTION searching
587                                 full_package = self.portdb.xmatch("bestmatch-visible", package)
588                                 if not full_package:
589                                         #no match found; we don't want to query description
590                                         full_package = portage.best(
591                                                 self.portdb.xmatch("match-all", package))
592                                         if not full_package:
593                                                 continue
594                                         else:
595                                                 masked=1
596                                 try:
597                                         full_desc = self.portdb.aux_get(
598                                                 full_package, ["DESCRIPTION"])[0]
599                                 except KeyError:
600                                         print "emerge: search: aux_get() failed, skipping"
601                                         continue
602                                 if self.searchre.search(full_desc):
603                                         self.matches["desc"].append([full_package,masked])
604
605                 self.sdict = self.setconfig.getSets()
606                 for setname in self.sdict:
607                         self.spinner.update()
608                         if match_category:
609                                 match_string = setname
610                         else:
611                                 match_string = setname.split("/")[-1]
612                         
613                         if self.searchre.search(match_string):
614                                 self.matches["set"].append([setname, False])
615                         elif self.searchdesc:
616                                 if self.searchre.search(
617                                         self.sdict[setname].getMetadata("DESCRIPTION")):
618                                         self.matches["set"].append([setname, False])
619                         
620                 self.mlen=0
621                 for mtype in self.matches:
622                         self.matches[mtype].sort()
623                         self.mlen += len(self.matches[mtype])
624
625         def addCP(self, cp):
626                 if not self.portdb.xmatch("match-all", cp):
627                         return
628                 masked = 0
629                 if not self.portdb.xmatch("bestmatch-visible", cp):
630                         masked = 1
631                 self.matches["pkg"].append([cp, masked])
632                 self.mlen += 1
633
634         def output(self):
635                 """Outputs the results of the search."""
636                 print "\b\b  \n[ Results for search key : "+white(self.searchkey)+" ]"
637                 print "[ Applications found : "+white(str(self.mlen))+" ]"
638                 print " "
639                 vardb = self.vartree.dbapi
640                 for mtype in self.matches:
641                         for match,masked in self.matches[mtype]:
642                                 full_package = None
643                                 if mtype == "pkg":
644                                         catpack = match
645                                         full_package = self.portdb.xmatch(
646                                                 "bestmatch-visible", match)
647                                         if not full_package:
648                                                 #no match found; we don't want to query description
649                                                 masked=1
650                                                 full_package = portage.best(
651                                                         self.portdb.xmatch("match-all",match))
652                                 elif mtype == "desc":
653                                         full_package = match
654                                         match        = portage.cpv_getkey(match)
655                                 elif mtype == "set":
656                                         print green("*")+"  "+white(match)
657                                         print "     ", darkgreen("Description:")+"  ", self.sdict[match].getMetadata("DESCRIPTION")
658                                         print
659                                 if full_package:
660                                         try:
661                                                 desc, homepage, license = self.portdb.aux_get(
662                                                         full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
663                                         except KeyError:
664                                                 print "emerge: search: aux_get() failed, skipping"
665                                                 continue
666                                         if masked:
667                                                 print green("*")+"  "+white(match)+" "+red("[ Masked ]")
668                                         else:
669                                                 print green("*")+"  "+white(match)
670                                         myversion = self.getVersion(full_package, search.VERSION_RELEASE)
671
672                                         mysum = [0,0]
673                                         file_size_str = None
674                                         mycat = match.split("/")[0]
675                                         mypkg = match.split("/")[1]
676                                         mycpv = match + "-" + myversion
677                                         myebuild = self.portdb.findname(mycpv)
678                                         if myebuild:
679                                                 pkgdir = os.path.dirname(myebuild)
680                                                 from portage import manifest
681                                                 mf = manifest.Manifest(
682                                                         pkgdir, self.settings["DISTDIR"])
683                                                 try:
684                                                         uri_map = self.portdb.getFetchMap(mycpv)
685                                                 except portage.exception.InvalidDependString, e:
686                                                         file_size_str = "Unknown (%s)" % (e,)
687                                                         del e
688                                                 else:
689                                                         try:
690                                                                 mysum[0] = mf.getDistfilesSize(uri_map)
691                                                         except KeyError, e:
692                                                                 file_size_str = "Unknown (missing " + \
693                                                                         "digest for %s)" % (e,)
694                                                                 del e
695
696                                         available = False
697                                         for db in self._dbs:
698                                                 if db is not vardb and \
699                                                         db.cpv_exists(mycpv):
700                                                         available = True
701                                                         if not myebuild and hasattr(db, "bintree"):
702                                                                 myebuild = db.bintree.getname(mycpv)
703                                                                 try:
704                                                                         mysum[0] = os.stat(myebuild).st_size
705                                                                 except OSError:
706                                                                         myebuild = None
707                                                         break
708
709                                         if myebuild and file_size_str is None:
710                                                 mystr = str(mysum[0] / 1024)
711                                                 mycount = len(mystr)
712                                                 while (mycount > 3):
713                                                         mycount -= 3
714                                                         mystr = mystr[:mycount] + "," + mystr[mycount:]
715                                                 file_size_str = mystr + " kB"
716
717                                         if self.verbose:
718                                                 if available:
719                                                         print "     ", darkgreen("Latest version available:"),myversion
720                                                 print "     ", self.getInstallationStatus(mycat+'/'+mypkg)
721                                                 if myebuild:
722                                                         print "      %s %s" % \
723                                                                 (darkgreen("Size of files:"), file_size_str)
724                                                 print "     ", darkgreen("Homepage:")+"     ",homepage
725                                                 print "     ", darkgreen("Description:")+"  ",desc
726                                                 print "     ", darkgreen("License:")+"      ",license
727                                                 print
728         #
729         # private interface
730         #
731         def getInstallationStatus(self,package):
732                 installed_package = self.vartree.dep_bestmatch(package)
733                 result = ""
734                 version = self.getVersion(installed_package,search.VERSION_RELEASE)
735                 if len(version) > 0:
736                         result = darkgreen("Latest version installed:")+" "+version
737                 else:
738                         result = darkgreen("Latest version installed:")+" [ Not Installed ]"
739                 return result
740
741         def getVersion(self,full_package,detail):
742                 if len(full_package) > 1:
743                         package_parts = portage.catpkgsplit(full_package)
744                         if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
745                                 result = package_parts[2]+ "-" + package_parts[3]
746                         else:
747                                 result = package_parts[2]
748                 else:
749                         result = ""
750                 return result
751
752 class RootConfig(object):
753         """This is used internally by depgraph to track information about a
754         particular $ROOT."""
755
756         pkg_tree_map = {
757                 "ebuild"    : "porttree",
758                 "binary"    : "bintree",
759                 "installed" : "vartree"
760         }
761
762         tree_pkg_map = {}
763         for k, v in pkg_tree_map.iteritems():
764                 tree_pkg_map[v] = k
765
766         def __init__(self, settings, trees, setconfig):
767                 self.trees = trees
768                 self.settings = settings
769                 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
770                 self.root = self.settings["ROOT"]
771                 self.setconfig = setconfig
772                 self.sets = self.setconfig.getSets()
773                 self.visible_pkgs = PackageVirtualDbapi(self.settings)
774
775 def create_world_atom(pkg, args_set, root_config):
776         """Create a new atom for the world file if one does not exist.  If the
777         argument atom is precise enough to identify a specific slot then a slot
778         atom will be returned. Atoms that are in the system set may also be stored
779         in world since system atoms can only match one slot while world atoms can
780         be greedy with respect to slots.  Unslotted system packages will not be
781         stored in world."""
782
783         arg_atom = args_set.findAtomForPackage(pkg)
784         if not arg_atom:
785                 return None
786         cp = portage.dep_getkey(arg_atom)
787         new_world_atom = cp
788         sets = root_config.sets
789         portdb = root_config.trees["porttree"].dbapi
790         vardb = root_config.trees["vartree"].dbapi
791         available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
792                 for cpv in portdb.match(cp))
793         slotted = len(available_slots) > 1 or \
794                 (len(available_slots) == 1 and "0" not in available_slots)
795         if not slotted:
796                 # check the vdb in case this is multislot
797                 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
798                         for cpv in vardb.match(cp))
799                 slotted = len(available_slots) > 1 or \
800                         (len(available_slots) == 1 and "0" not in available_slots)
801         if slotted and arg_atom != cp:
802                 # If the user gave a specific atom, store it as a
803                 # slot atom in the world file.
804                 slot_atom = pkg.slot_atom
805
806                 # For USE=multislot, there are a couple of cases to
807                 # handle here:
808                 #
809                 # 1) SLOT="0", but the real SLOT spontaneously changed to some
810                 #    unknown value, so just record an unslotted atom.
811                 #
812                 # 2) SLOT comes from an installed package and there is no
813                 #    matching SLOT in the portage tree.
814                 #
815                 # Make sure that the slot atom is available in either the
816                 # portdb or the vardb, since otherwise the user certainly
817                 # doesn't want the SLOT atom recorded in the world file
818                 # (case 1 above).  If it's only available in the vardb,
819                 # the user may be trying to prevent a USE=multislot
820                 # package from being removed by --depclean (case 2 above).
821
822                 mydb = portdb
823                 if not portdb.match(slot_atom):
824                         # SLOT seems to come from an installed multislot package
825                         mydb = vardb
826                 # If there is no installed package matching the SLOT atom,
827                 # it probably changed SLOT spontaneously due to USE=multislot,
828                 # so just record an unslotted atom.
829                 if vardb.match(slot_atom):
830                         # Now verify that the argument is precise
831                         # enough to identify a specific slot.
832                         matches = mydb.match(arg_atom)
833                         matched_slots = set()
834                         for cpv in matches:
835                                 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
836                         if len(matched_slots) == 1:
837                                 new_world_atom = slot_atom
838
839         if new_world_atom == sets["world"].findAtomForPackage(pkg):
840                 # Both atoms would be identical, so there's nothing to add.
841                 return None
842         if not slotted:
843                 # Unlike world atoms, system atoms are not greedy for slots, so they
844                 # can't be safely excluded from world if they are slotted.
845                 system_atom = sets["system"].findAtomForPackage(pkg)
846                 if system_atom:
847                         if not portage.dep_getkey(system_atom).startswith("virtual/"):
848                                 return None
849                         # System virtuals aren't safe to exclude from world since they can
850                         # match multiple old-style virtuals but only one of them will be
851                         # pulled in by update or depclean.
852                         providers = portdb.mysettings.getvirtuals().get(
853                                 portage.dep_getkey(system_atom))
854                         if providers and len(providers) == 1 and providers[0] == cp:
855                                 return None
856         return new_world_atom
857
858 def filter_iuse_defaults(iuse):
859         for flag in iuse:
860                 if flag.startswith("+") or flag.startswith("-"):
861                         yield flag[1:]
862                 else:
863                         yield flag
864
865 class SlotObject(object):
866         __slots__ = ("__weakref__",)
867
868         def __init__(self, **kwargs):
869                 classes = [self.__class__]
870                 while classes:
871                         c = classes.pop()
872                         if c is SlotObject:
873                                 continue
874                         classes.extend(c.__bases__)
875                         slots = getattr(c, "__slots__", None)
876                         if not slots:
877                                 continue
878                         for myattr in slots:
879                                 myvalue = kwargs.get(myattr, None)
880                                 setattr(self, myattr, myvalue)
881
882         def copy(self):
883                 """
884                 Create a new instance and copy all attributes
885                 defined from __slots__ (including those from
886                 inherited classes).
887                 """
888                 obj = self.__class__()
889
890                 classes = [self.__class__]
891                 while classes:
892                         c = classes.pop()
893                         if c is SlotObject:
894                                 continue
895                         classes.extend(c.__bases__)
896                         slots = getattr(c, "__slots__", None)
897                         if not slots:
898                                 continue
899                         for myattr in slots:
900                                 setattr(obj, myattr, getattr(self, myattr))
901
902                 return obj
903
904 class AbstractDepPriority(SlotObject):
905         __slots__ = ("buildtime", "runtime", "runtime_post")
906
907         def __lt__(self, other):
908                 return self.__int__() < other
909
910         def __le__(self, other):
911                 return self.__int__() <= other
912
913         def __eq__(self, other):
914                 return self.__int__() == other
915
916         def __ne__(self, other):
917                 return self.__int__() != other
918
919         def __gt__(self, other):
920                 return self.__int__() > other
921
922         def __ge__(self, other):
923                 return self.__int__() >= other
924
925         def copy(self):
926                 import copy
927                 return copy.copy(self)
928
929 class DepPriority(AbstractDepPriority):
930
931         __slots__ = ("satisfied", "optional", "rebuild")
932
933         def __int__(self):
934                 return 0
935
936         def __str__(self):
937                 if self.optional:
938                         return "optional"
939                 if self.buildtime:
940                         return "buildtime"
941                 if self.runtime:
942                         return "runtime"
943                 if self.runtime_post:
944                         return "runtime_post"
945                 return "soft"
946
947 class BlockerDepPriority(DepPriority):
948         __slots__ = ()
949         def __int__(self):
950                 return 0
951
952         def __str__(self):
953                 return 'blocker'
954
955 BlockerDepPriority.instance = BlockerDepPriority()
956
957 class UnmergeDepPriority(AbstractDepPriority):
958         __slots__ = ("optional", "satisfied",)
959         """
960         Combination of properties           Priority  Category
961
962         runtime                                0       HARD
963         runtime_post                          -1       HARD
964         buildtime                             -2       SOFT
965         (none of the above)                   -2       SOFT
966         """
967
968         MAX    =  0
969         SOFT   = -2
970         MIN    = -2
971
972         def __int__(self):
973                 if self.runtime:
974                         return 0
975                 if self.runtime_post:
976                         return -1
977                 if self.buildtime:
978                         return -2
979                 return -2
980
981         def __str__(self):
982                 myvalue = self.__int__()
983                 if myvalue > self.SOFT:
984                         return "hard"
985                 return "soft"
986
987 class DepPriorityNormalRange(object):
988         """
989         DepPriority properties              Index      Category
990
991         buildtime                                      HARD
992         runtime                                3       MEDIUM
993         runtime_post                           2       MEDIUM_SOFT
994         optional                               1       SOFT
995         (none of the above)                    0       NONE
996         """
997         MEDIUM      = 3
998         MEDIUM_SOFT = 2
999         SOFT        = 1
1000         NONE        = 0
1001
1002         @classmethod
1003         def _ignore_optional(cls, priority):
1004                 if priority.__class__ is not DepPriority:
1005                         return False
1006                 return bool(priority.optional)
1007
1008         @classmethod
1009         def _ignore_runtime_post(cls, priority):
1010                 if priority.__class__ is not DepPriority:
1011                         return False
1012                 return bool(priority.optional or priority.runtime_post)
1013
1014         @classmethod
1015         def _ignore_runtime(cls, priority):
1016                 if priority.__class__ is not DepPriority:
1017                         return False
1018                 return not priority.buildtime
1019
1020         ignore_medium      = _ignore_runtime
1021         ignore_medium_soft = _ignore_runtime_post
1022         ignore_soft        = _ignore_optional
1023
1024 DepPriorityNormalRange.ignore_priority = (
1025         None,
1026         DepPriorityNormalRange._ignore_optional,
1027         DepPriorityNormalRange._ignore_runtime_post,
1028         DepPriorityNormalRange._ignore_runtime
1029 )
1030
1031 class DepPrioritySatisfiedRange(object):
1032         """
1033         DepPriority                         Index      Category
1034
1035         not satisfied and buildtime                    HARD
1036         not satisfied and runtime              7       MEDIUM
1037         not satisfied and runtime_post         6       MEDIUM_SOFT
1038         satisfied and buildtime and rebuild    5       SOFT
1039         satisfied and buildtime                4       SOFT
1040         satisfied and runtime                  3       SOFT
1041         satisfied and runtime_post             2       SOFT
1042         optional                               1       SOFT
1043         (none of the above)                    0       NONE
1044         """
1045         MEDIUM      = 7
1046         MEDIUM_SOFT = 6
1047         SOFT        = 5
1048         NONE        = 0
1049
1050         @classmethod
1051         def _ignore_optional(cls, priority):
1052                 if priority.__class__ is not DepPriority:
1053                         return False
1054                 return bool(priority.optional)
1055
1056         @classmethod
1057         def _ignore_satisfied_runtime_post(cls, priority):
1058                 if priority.__class__ is not DepPriority:
1059                         return False
1060                 if priority.optional:
1061                         return True
1062                 if not priority.satisfied:
1063                         return False
1064                 return bool(priority.runtime_post)
1065
1066         @classmethod
1067         def _ignore_satisfied_runtime(cls, priority):
1068                 if priority.__class__ is not DepPriority:
1069                         return False
1070                 if priority.optional:
1071                         return True
1072                 if not priority.satisfied:
1073                         return False
1074                 return not priority.buildtime
1075
1076         @classmethod
1077         def _ignore_satisfied_buildtime(cls, priority):
1078                 if priority.__class__ is not DepPriority:
1079                         return False
1080                 if priority.optional:
1081                         return True
1082                 if not priority.satisfied:
1083                         return False
1084                 if priority.buildtime:
1085                         return not priority.rebuild
1086                 return True
1087
1088         @classmethod
1089         def _ignore_satisfied_buildtime_rebuild(cls, priority):
1090                 if priority.__class__ is not DepPriority:
1091                         return False
1092                 if priority.optional:
1093                         return True
1094                 return bool(priority.satisfied)
1095
1096         @classmethod
1097         def _ignore_runtime_post(cls, priority):
1098                 if priority.__class__ is not DepPriority:
1099                         return False
1100                 return bool(priority.optional or \
1101                         priority.satisfied or \
1102                         priority.runtime_post)
1103
1104         @classmethod
1105         def _ignore_runtime(cls, priority):
1106                 if priority.__class__ is not DepPriority:
1107                         return False
1108                 return bool(priority.satisfied or \
1109                         not priority.buildtime)
1110
1111         ignore_medium      = _ignore_runtime
1112         ignore_medium_soft = _ignore_runtime_post
1113         ignore_soft        = _ignore_satisfied_buildtime_rebuild
1114
1115 DepPrioritySatisfiedRange.ignore_priority = (
1116         None,
1117         DepPrioritySatisfiedRange._ignore_optional,
1118         DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1119         DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1120         DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1121         DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1122         DepPrioritySatisfiedRange._ignore_runtime_post,
1123         DepPrioritySatisfiedRange._ignore_runtime
1124 )
1125
1126 def _find_deep_system_runtime_deps(graph):
1127         deep_system_deps = set()
1128         node_stack = []
1129         for node in graph:
1130                 if not isinstance(node, Package) or \
1131                         node.operation == 'uninstall':
1132                         continue
1133                 if node.root_config.sets['system'].findAtomForPackage(node):
1134                         node_stack.append(node)
1135
1136         def ignore_priority(priority):
1137                 """
1138                 Ignore non-runtime priorities.
1139                 """
1140                 if isinstance(priority, DepPriority) and \
1141                         (priority.runtime or priority.runtime_post):
1142                         return False
1143                 return True
1144
1145         while node_stack:
1146                 node = node_stack.pop()
1147                 if node in deep_system_deps:
1148                         continue
1149                 deep_system_deps.add(node)
1150                 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1151                         if not isinstance(child, Package) or \
1152                                 child.operation == 'uninstall':
1153                                 continue
1154                         node_stack.append(child)
1155
1156         return deep_system_deps
1157
1158 class FakeVartree(portage.vartree):
1159         """This is implements an in-memory copy of a vartree instance that provides
1160         all the interfaces required for use by the depgraph.  The vardb is locked
1161         during the constructor call just long enough to read a copy of the
1162         installed package information.  This allows the depgraph to do it's
1163         dependency calculations without holding a lock on the vardb.  It also
1164         allows things like vardb global updates to be done in memory so that the
1165         user doesn't necessarily need write access to the vardb in cases where
1166         global updates are necessary (updates are performed when necessary if there
1167         is not a matching ebuild in the tree)."""
1168         def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1169                 self._root_config = root_config
1170                 if pkg_cache is None:
1171                         pkg_cache = {}
1172                 real_vartree = root_config.trees["vartree"]
1173                 portdb = root_config.trees["porttree"].dbapi
1174                 self.root = real_vartree.root
1175                 self.settings = real_vartree.settings
1176                 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1177                 if "_mtime_" not in mykeys:
1178                         mykeys.append("_mtime_")
1179                 self._db_keys = mykeys
1180                 self._pkg_cache = pkg_cache
1181                 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1182                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1183                 try:
1184                         # At least the parent needs to exist for the lock file.
1185                         portage.util.ensure_dirs(vdb_path)
1186                 except portage.exception.PortageException:
1187                         pass
1188                 vdb_lock = None
1189                 try:
1190                         if acquire_lock and os.access(vdb_path, os.W_OK):
1191                                 vdb_lock = portage.locks.lockdir(vdb_path)
1192                         real_dbapi = real_vartree.dbapi
1193                         slot_counters = {}
1194                         for cpv in real_dbapi.cpv_all():
1195                                 cache_key = ("installed", self.root, cpv, "nomerge")
1196                                 pkg = self._pkg_cache.get(cache_key)
1197                                 if pkg is not None:
1198                                         metadata = pkg.metadata
1199                                 else:
1200                                         metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1201                                 myslot = metadata["SLOT"]
1202                                 mycp = portage.dep_getkey(cpv)
1203                                 myslot_atom = "%s:%s" % (mycp, myslot)
1204                                 try:
1205                                         mycounter = long(metadata["COUNTER"])
1206                                 except ValueError:
1207                                         mycounter = 0
1208                                         metadata["COUNTER"] = str(mycounter)
1209                                 other_counter = slot_counters.get(myslot_atom, None)
1210                                 if other_counter is not None:
1211                                         if other_counter > mycounter:
1212                                                 continue
1213                                 slot_counters[myslot_atom] = mycounter
1214                                 if pkg is None:
1215                                         pkg = Package(built=True, cpv=cpv,
1216                                                 installed=True, metadata=metadata,
1217                                                 root_config=root_config, type_name="installed")
1218                                 self._pkg_cache[pkg] = pkg
1219                                 self.dbapi.cpv_inject(pkg)
1220                         real_dbapi.flush_cache()
1221                 finally:
1222                         if vdb_lock:
1223                                 portage.locks.unlockdir(vdb_lock)
1224                 # Populate the old-style virtuals using the cached values.
1225                 if not self.settings.treeVirtuals:
1226                         self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1227                                 portage.getCPFromCPV, self.get_all_provides())
1228
1229                 # Intialize variables needed for lazy cache pulls of the live ebuild
1230                 # metadata.  This ensures that the vardb lock is released ASAP, without
1231                 # being delayed in case cache generation is triggered.
1232                 self._aux_get = self.dbapi.aux_get
1233                 self.dbapi.aux_get = self._aux_get_wrapper
1234                 self._match = self.dbapi.match
1235                 self.dbapi.match = self._match_wrapper
1236                 self._aux_get_history = set()
1237                 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1238                 self._portdb = portdb
1239                 self._global_updates = None
1240
1241         def _match_wrapper(self, cpv, use_cache=1):
1242                 """
1243                 Make sure the metadata in Package instances gets updated for any
1244                 cpv that is returned from a match() call, since the metadata can
1245                 be accessed directly from the Package instance instead of via
1246                 aux_get().
1247                 """
1248                 matches = self._match(cpv, use_cache=use_cache)
1249                 for cpv in matches:
1250                         if cpv in self._aux_get_history:
1251                                 continue
1252                         self._aux_get_wrapper(cpv, [])
1253                 return matches
1254
1255         def _aux_get_wrapper(self, pkg, wants):
1256                 if pkg in self._aux_get_history:
1257                         return self._aux_get(pkg, wants)
1258                 self._aux_get_history.add(pkg)
1259                 try:
1260                         # Use the live ebuild metadata if possible.
1261                         live_metadata = dict(izip(self._portdb_keys,
1262                                 self._portdb.aux_get(pkg, self._portdb_keys)))
1263                         if not portage.eapi_is_supported(live_metadata["EAPI"]):
1264                                 raise KeyError(pkg)
1265                         self.dbapi.aux_update(pkg, live_metadata)
1266                 except (KeyError, portage.exception.PortageException):
1267                         if self._global_updates is None:
1268                                 self._global_updates = \
1269                                         grab_global_updates(self._portdb.porttree_root)
1270                         perform_global_updates(
1271                                 pkg, self.dbapi, self._global_updates)
1272                 return self._aux_get(pkg, wants)
1273
1274         def sync(self, acquire_lock=1):
1275                 """
1276                 Call this method to synchronize state with the real vardb
1277                 after one or more packages may have been installed or
1278                 uninstalled.
1279                 """
1280                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1281                 try:
1282                         # At least the parent needs to exist for the lock file.
1283                         portage.util.ensure_dirs(vdb_path)
1284                 except portage.exception.PortageException:
1285                         pass
1286                 vdb_lock = None
1287                 try:
1288                         if acquire_lock and os.access(vdb_path, os.W_OK):
1289                                 vdb_lock = portage.locks.lockdir(vdb_path)
1290                         self._sync()
1291                 finally:
1292                         if vdb_lock:
1293                                 portage.locks.unlockdir(vdb_lock)
1294
1295         def _sync(self):
1296
1297                 real_vardb = self._root_config.trees["vartree"].dbapi
1298                 current_cpv_set = frozenset(real_vardb.cpv_all())
1299                 pkg_vardb = self.dbapi
1300                 aux_get_history = self._aux_get_history
1301
1302                 # Remove any packages that have been uninstalled.
1303                 for pkg in list(pkg_vardb):
1304                         if pkg.cpv not in current_cpv_set:
1305                                 pkg_vardb.cpv_remove(pkg)
1306                                 aux_get_history.discard(pkg.cpv)
1307
1308                 # Validate counters and timestamps.
1309                 slot_counters = {}
1310                 root = self.root
1311                 validation_keys = ["COUNTER", "_mtime_"]
1312                 for cpv in current_cpv_set:
1313
1314                         pkg_hash_key = ("installed", root, cpv, "nomerge")
1315                         pkg = pkg_vardb.get(pkg_hash_key)
1316                         if pkg is not None:
1317                                 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1318                                 try:
1319                                         counter = long(counter)
1320                                 except ValueError:
1321                                         counter = 0
1322
1323                                 if counter != pkg.counter or \
1324                                         mtime != pkg.mtime:
1325                                         pkg_vardb.cpv_remove(pkg)
1326                                         aux_get_history.discard(pkg.cpv)
1327                                         pkg = None
1328
1329                         if pkg is None:
1330                                 pkg = self._pkg(cpv)
1331
1332                         other_counter = slot_counters.get(pkg.slot_atom)
1333                         if other_counter is not None:
1334                                 if other_counter > pkg.counter:
1335                                         continue
1336
1337                         slot_counters[pkg.slot_atom] = pkg.counter
1338                         pkg_vardb.cpv_inject(pkg)
1339
1340                 real_vardb.flush_cache()
1341
1342         def _pkg(self, cpv):
1343                 root_config = self._root_config
1344                 real_vardb = root_config.trees["vartree"].dbapi
1345                 pkg = Package(cpv=cpv, installed=True,
1346                         metadata=izip(self._db_keys,
1347                         real_vardb.aux_get(cpv, self._db_keys)),
1348                         root_config=root_config,
1349                         type_name="installed")
1350
1351                 try:
1352                         mycounter = long(pkg.metadata["COUNTER"])
1353                 except ValueError:
1354                         mycounter = 0
1355                         pkg.metadata["COUNTER"] = str(mycounter)
1356
1357                 return pkg
1358
1359 def grab_global_updates(portdir):
1360         from portage.update import grab_updates, parse_updates
1361         updpath = os.path.join(portdir, "profiles", "updates")
1362         try:
1363                 rawupdates = grab_updates(updpath)
1364         except portage.exception.DirectoryNotFound:
1365                 rawupdates = []
1366         upd_commands = []
1367         for mykey, mystat, mycontent in rawupdates:
1368                 commands, errors = parse_updates(mycontent)
1369                 upd_commands.extend(commands)
1370         return upd_commands
1371
1372 def perform_global_updates(mycpv, mydb, mycommands):
1373         from portage.update import update_dbentries
1374         aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1375         aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1376         updates = update_dbentries(mycommands, aux_dict)
1377         if updates:
1378                 mydb.aux_update(mycpv, updates)
1379
1380 def visible(pkgsettings, pkg):
1381         """
1382         Check if a package is visible. This can raise an InvalidDependString
1383         exception if LICENSE is invalid.
1384         TODO: optionally generate a list of masking reasons
1385         @rtype: Boolean
1386         @returns: True if the package is visible, False otherwise.
1387         """
1388         if not pkg.metadata["SLOT"]:
1389                 return False
1390         if not pkg.installed:
1391                 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1392                         return False
1393         eapi = pkg.metadata["EAPI"]
1394         if not portage.eapi_is_supported(eapi):
1395                 return False
1396         if not pkg.installed:
1397                 if portage._eapi_is_deprecated(eapi):
1398                         return False
1399                 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1400                         return False
1401         if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1402                 return False
1403         if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1404                 return False
1405         try:
1406                 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1407                         return False
1408         except portage.exception.InvalidDependString:
1409                 return False
1410         return True
1411
1412 def get_masking_status(pkg, pkgsettings, root_config):
1413
1414         mreasons = portage.getmaskingstatus(
1415                 pkg, settings=pkgsettings,
1416                 portdb=root_config.trees["porttree"].dbapi)
1417
1418         if not pkg.installed:
1419                 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1420                         mreasons.append("CHOST: %s" % \
1421                                 pkg.metadata["CHOST"])
1422
1423         if not pkg.metadata["SLOT"]:
1424                 mreasons.append("invalid: SLOT is undefined")
1425
1426         return mreasons
1427
1428 def get_mask_info(root_config, cpv, pkgsettings,
1429         db, pkg_type, built, installed, db_keys):
1430         eapi_masked = False
1431         try:
1432                 metadata = dict(izip(db_keys,
1433                         db.aux_get(cpv, db_keys)))
1434         except KeyError:
1435                 metadata = None
1436         if metadata and not built:
1437                 pkgsettings.setcpv(cpv, mydb=metadata)
1438                 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1439                 metadata['CHOST'] = pkgsettings.get('CHOST', '')
1440         if metadata is None:
1441                 mreasons = ["corruption"]
1442         else:
1443                 pkg = Package(type_name=pkg_type, root_config=root_config,
1444                         cpv=cpv, built=built, installed=installed, metadata=metadata)
1445                 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1446         return metadata, mreasons
1447
1448 def show_masked_packages(masked_packages):
1449         shown_licenses = set()
1450         shown_comments = set()
1451         # Maybe there is both an ebuild and a binary. Only
1452         # show one of them to avoid redundant appearance.
1453         shown_cpvs = set()
1454         have_eapi_mask = False
1455         for (root_config, pkgsettings, cpv,
1456                 metadata, mreasons) in masked_packages:
1457                 if cpv in shown_cpvs:
1458                         continue
1459                 shown_cpvs.add(cpv)
1460                 comment, filename = None, None
1461                 if "package.mask" in mreasons:
1462                         comment, filename = \
1463                                 portage.getmaskingreason(
1464                                 cpv, metadata=metadata,
1465                                 settings=pkgsettings,
1466                                 portdb=root_config.trees["porttree"].dbapi,
1467                                 return_location=True)
1468                 missing_licenses = []
1469                 if metadata:
1470                         if not portage.eapi_is_supported(metadata["EAPI"]):
1471                                 have_eapi_mask = True
1472                         try:
1473                                 missing_licenses = \
1474                                         pkgsettings._getMissingLicenses(
1475                                                 cpv, metadata)
1476                         except portage.exception.InvalidDependString:
1477                                 # This will have already been reported
1478                                 # above via mreasons.
1479                                 pass
1480
1481                 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1482                 if comment and comment not in shown_comments:
1483                         print filename+":"
1484                         print comment
1485                         shown_comments.add(comment)
1486                 portdb = root_config.trees["porttree"].dbapi
1487                 for l in missing_licenses:
1488                         l_path = portdb.findLicensePath(l)
1489                         if l in shown_licenses:
1490                                 continue
1491                         msg = ("A copy of the '%s' license" + \
1492                         " is located at '%s'.") % (l, l_path)
1493                         print msg
1494                         print
1495                         shown_licenses.add(l)
1496         return have_eapi_mask
1497
1498 class Task(SlotObject):
1499         __slots__ = ("_hash_key", "_hash_value")
1500
1501         def _get_hash_key(self):
1502                 hash_key = getattr(self, "_hash_key", None)
1503                 if hash_key is None:
1504                         raise NotImplementedError(self)
1505                 return hash_key
1506
1507         def __eq__(self, other):
1508                 return self._get_hash_key() == other
1509
1510         def __ne__(self, other):
1511                 return self._get_hash_key() != other
1512
1513         def __hash__(self):
1514                 hash_value = getattr(self, "_hash_value", None)
1515                 if hash_value is None:
1516                         self._hash_value = hash(self._get_hash_key())
1517                 return self._hash_value
1518
1519         def __len__(self):
1520                 return len(self._get_hash_key())
1521
1522         def __getitem__(self, key):
1523                 return self._get_hash_key()[key]
1524
1525         def __iter__(self):
1526                 return iter(self._get_hash_key())
1527
1528         def __contains__(self, key):
1529                 return key in self._get_hash_key()
1530
1531         def __str__(self):
1532                 return str(self._get_hash_key())
1533
1534 class Blocker(Task):
1535
1536         __hash__ = Task.__hash__
1537         __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1538
1539         def __init__(self, **kwargs):
1540                 Task.__init__(self, **kwargs)
1541                 self.cp = portage.dep_getkey(self.atom)
1542
1543         def _get_hash_key(self):
1544                 hash_key = getattr(self, "_hash_key", None)
1545                 if hash_key is None:
1546                         self._hash_key = \
1547                                 ("blocks", self.root, self.atom, self.eapi)
1548                 return self._hash_key
1549
1550 class Package(Task):
1551
1552         __hash__ = Task.__hash__
1553         __slots__ = ("built", "cpv", "depth",
1554                 "installed", "metadata", "onlydeps", "operation",
1555                 "root_config", "type_name",
1556                 "category", "counter", "cp", "cpv_split",
1557                 "inherited", "iuse", "mtime",
1558                 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1559
1560         metadata_keys = [
1561                 "CHOST", "COUNTER", "DEPEND", "EAPI",
1562                 "INHERITED", "IUSE", "KEYWORDS",
1563                 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1564                 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1565
1566         def __init__(self, **kwargs):
1567                 Task.__init__(self, **kwargs)
1568                 self.root = self.root_config.root
1569                 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1570                 self.cp = portage.cpv_getkey(self.cpv)
1571                 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, self.slot))
1572                 self.category, self.pf = portage.catsplit(self.cpv)
1573                 self.cpv_split = portage.catpkgsplit(self.cpv)
1574                 self.pv_split = self.cpv_split[1:]
1575
1576         class _use(object):
1577
1578                 __slots__ = ("__weakref__", "enabled")
1579
1580                 def __init__(self, use):
1581                         self.enabled = frozenset(use)
1582
1583         class _iuse(object):
1584
1585                 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1586
1587                 def __init__(self, tokens, iuse_implicit):
1588                         self.tokens = tuple(tokens)
1589                         self.iuse_implicit = iuse_implicit
1590                         enabled = []
1591                         disabled = []
1592                         other = []
1593                         for x in tokens:
1594                                 prefix = x[:1]
1595                                 if prefix == "+":
1596                                         enabled.append(x[1:])
1597                                 elif prefix == "-":
1598                                         disabled.append(x[1:])
1599                                 else:
1600                                         other.append(x)
1601                         self.enabled = frozenset(enabled)
1602                         self.disabled = frozenset(disabled)
1603                         self.all = frozenset(chain(enabled, disabled, other))
1604
1605                 def __getattribute__(self, name):
1606                         if name == "regex":
1607                                 try:
1608                                         return object.__getattribute__(self, "regex")
1609                                 except AttributeError:
1610                                         all = object.__getattribute__(self, "all")
1611                                         iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1612                                         # Escape anything except ".*" which is supposed
1613                                         # to pass through from _get_implicit_iuse()
1614                                         regex = (re.escape(x) for x in chain(all, iuse_implicit))
1615                                         regex = "^(%s)$" % "|".join(regex)
1616                                         regex = regex.replace("\\.\\*", ".*")
1617                                         self.regex = re.compile(regex)
1618                         return object.__getattribute__(self, name)
1619
1620         def _get_hash_key(self):
1621                 hash_key = getattr(self, "_hash_key", None)
1622                 if hash_key is None:
1623                         if self.operation is None:
1624                                 self.operation = "merge"
1625                                 if self.onlydeps or self.installed:
1626                                         self.operation = "nomerge"
1627                         self._hash_key = \
1628                                 (self.type_name, self.root, self.cpv, self.operation)
1629                 return self._hash_key
1630
1631         def __lt__(self, other):
1632                 if other.cp != self.cp:
1633                         return False
1634                 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1635                         return True
1636                 return False
1637
1638         def __le__(self, other):
1639                 if other.cp != self.cp:
1640                         return False
1641                 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1642                         return True
1643                 return False
1644
1645         def __gt__(self, other):
1646                 if other.cp != self.cp:
1647                         return False
1648                 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1649                         return True
1650                 return False
1651
1652         def __ge__(self, other):
1653                 if other.cp != self.cp:
1654                         return False
1655                 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1656                         return True
1657                 return False
1658
1659 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1660         if not x.startswith("UNUSED_"))
1661 _all_metadata_keys.discard("CDEPEND")
1662 _all_metadata_keys.update(Package.metadata_keys)
1663
1664 from portage.cache.mappings import slot_dict_class
1665 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1666
1667 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1668         """
1669         Detect metadata updates and synchronize Package attributes.
1670         """
1671
1672         __slots__ = ("_pkg",)
1673         _wrapped_keys = frozenset(
1674                 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1675
1676         def __init__(self, pkg, metadata):
1677                 _PackageMetadataWrapperBase.__init__(self)
1678                 self._pkg = pkg
1679                 self.update(metadata)
1680
1681         def __setitem__(self, k, v):
1682                 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1683                 if k in self._wrapped_keys:
1684                         getattr(self, "_set_" + k.lower())(k, v)
1685
1686         def _set_inherited(self, k, v):
1687                 if isinstance(v, basestring):
1688                         v = frozenset(v.split())
1689                 self._pkg.inherited = v
1690
1691         def _set_iuse(self, k, v):
1692                 self._pkg.iuse = self._pkg._iuse(
1693                         v.split(), self._pkg.root_config.iuse_implicit)
1694
1695         def _set_slot(self, k, v):
1696                 self._pkg.slot = v
1697
1698         def _set_use(self, k, v):
1699                 self._pkg.use = self._pkg._use(v.split())
1700
1701         def _set_counter(self, k, v):
1702                 if isinstance(v, basestring):
1703                         try:
1704                                 v = long(v.strip())
1705                         except ValueError:
1706                                 v = 0
1707                 self._pkg.counter = v
1708
1709         def _set__mtime_(self, k, v):
1710                 if isinstance(v, basestring):
1711                         try:
1712                                 v = long(v.strip())
1713                         except ValueError:
1714                                 v = 0
1715                 self._pkg.mtime = v
1716
1717 class EbuildFetchonly(SlotObject):
1718
1719         __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1720
1721         def execute(self):
1722                 settings = self.settings
1723                 pkg = self.pkg
1724                 portdb = pkg.root_config.trees["porttree"].dbapi
1725                 ebuild_path = portdb.findname(pkg.cpv)
1726                 settings.setcpv(pkg)
1727                 debug = settings.get("PORTAGE_DEBUG") == "1"
1728                 use_cache = 1 # always true
1729                 portage.doebuild_environment(ebuild_path, "fetch",
1730                         settings["ROOT"], settings, debug, use_cache, portdb)
1731                 restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1732
1733                 if restrict_fetch:
1734                         rval = self._execute_with_builddir()
1735                 else:
1736                         rval = portage.doebuild(ebuild_path, "fetch",
1737                                 settings["ROOT"], settings, debug=debug,
1738                                 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1739                                 mydbapi=portdb, tree="porttree")
1740
1741                         if rval != os.EX_OK:
1742                                 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1743                                 eerror(msg, phase="unpack", key=pkg.cpv)
1744
1745                 return rval
1746
1747         def _execute_with_builddir(self):
1748                 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1749                 # ensuring sane $PWD (bug #239560) and storing elog
1750                 # messages. Use a private temp directory, in order
1751                 # to avoid locking the main one.
1752                 settings = self.settings
1753                 global_tmpdir = settings["PORTAGE_TMPDIR"]
1754                 from tempfile import mkdtemp
1755                 try:
1756                         private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1757                 except OSError, e:
1758                         if e.errno != portage.exception.PermissionDenied.errno:
1759                                 raise
1760                         raise portage.exception.PermissionDenied(global_tmpdir)
1761                 settings["PORTAGE_TMPDIR"] = private_tmpdir
1762                 settings.backup_changes("PORTAGE_TMPDIR")
1763                 try:
1764                         retval = self._execute()
1765                 finally:
1766                         settings["PORTAGE_TMPDIR"] = global_tmpdir
1767                         settings.backup_changes("PORTAGE_TMPDIR")
1768                         shutil.rmtree(private_tmpdir)
1769                 return retval
1770
1771         def _execute(self):
1772                 settings = self.settings
1773                 pkg = self.pkg
1774                 root_config = pkg.root_config
1775                 portdb = root_config.trees["porttree"].dbapi
1776                 ebuild_path = portdb.findname(pkg.cpv)
1777                 debug = settings.get("PORTAGE_DEBUG") == "1"
1778                 portage.prepare_build_dirs(self.pkg.root, self.settings, 0)
1779
1780                 retval = portage.doebuild(ebuild_path, "fetch",
1781                         self.settings["ROOT"], self.settings, debug=debug,
1782                         listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1783                         mydbapi=portdb, tree="porttree")
1784
1785                 if retval != os.EX_OK:
1786                         msg = "Fetch failed for '%s'" % (pkg.cpv,)
1787                         eerror(msg, phase="unpack", key=pkg.cpv)
1788
1789                 portage.elog.elog_process(self.pkg.cpv, self.settings)
1790                 return retval
1791
1792 class PollConstants(object):
1793
1794         """
1795         Provides POLL* constants that are equivalent to those from the
1796         select module, for use by PollSelectAdapter.
1797         """
1798
1799         names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1800         v = 1
1801         for k in names:
1802                 locals()[k] = getattr(select, k, v)
1803                 v *= 2
1804         del k, v
1805
1806 class AsynchronousTask(SlotObject):
1807         """
1808         Subclasses override _wait() and _poll() so that calls
1809         to public methods can be wrapped for implementing
1810         hooks such as exit listener notification.
1811
1812         Sublasses should call self.wait() to notify exit listeners after
1813         the task is complete and self.returncode has been set.
1814         """
1815
1816         __slots__ = ("background", "cancelled", "returncode") + \
1817                 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1818
1819         def start(self):
1820                 """
1821                 Start an asynchronous task and then return as soon as possible.
1822                 """
1823                 self._start_hook()
1824                 self._start()
1825
1826         def _start(self):
1827                 raise NotImplementedError(self)
1828
1829         def isAlive(self):
1830                 return self.returncode is None
1831
1832         def poll(self):
1833                 self._wait_hook()
1834                 return self._poll()
1835
1836         def _poll(self):
1837                 return self.returncode
1838
1839         def wait(self):
1840                 if self.returncode is None:
1841                         self._wait()
1842                 self._wait_hook()
1843                 return self.returncode
1844
1845         def _wait(self):
1846                 return self.returncode
1847
1848         def cancel(self):
1849                 self.cancelled = True
1850                 self.wait()
1851
1852         def addStartListener(self, f):
1853                 """
1854                 The function will be called with one argument, a reference to self.
1855                 """
1856                 if self._start_listeners is None:
1857                         self._start_listeners = []
1858                 self._start_listeners.append(f)
1859
1860         def removeStartListener(self, f):
1861                 if self._start_listeners is None:
1862                         return
1863                 self._start_listeners.remove(f)
1864
1865         def _start_hook(self):
1866                 if self._start_listeners is not None:
1867                         start_listeners = self._start_listeners
1868                         self._start_listeners = None
1869
1870                         for f in start_listeners:
1871                                 f(self)
1872
1873         def addExitListener(self, f):
1874                 """
1875                 The function will be called with one argument, a reference to self.
1876                 """
1877                 if self._exit_listeners is None:
1878                         self._exit_listeners = []
1879                 self._exit_listeners.append(f)
1880
1881         def removeExitListener(self, f):
1882                 if self._exit_listeners is None:
1883                         if self._exit_listener_stack is not None:
1884                                 self._exit_listener_stack.remove(f)
1885                         return
1886                 self._exit_listeners.remove(f)
1887
1888         def _wait_hook(self):
1889                 """
1890                 Call this method after the task completes, just before returning
1891                 the returncode from wait() or poll(). This hook is
1892                 used to trigger exit listeners when the returncode first
1893                 becomes available.
1894                 """
1895                 if self.returncode is not None and \
1896                         self._exit_listeners is not None:
1897
1898                         # This prevents recursion, in case one of the
1899                         # exit handlers triggers this method again by
1900                         # calling wait(). Use a stack that gives
1901                         # removeExitListener() an opportunity to consume
1902                         # listeners from the stack, before they can get
1903                         # called below. This is necessary because a call
1904                         # to one exit listener may result in a call to
1905                         # removeExitListener() for another listener on
1906                         # the stack. That listener needs to be removed
1907                         # from the stack since it would be inconsistent
1908                         # to call it after it has been been passed into
1909                         # removeExitListener().
1910                         self._exit_listener_stack = self._exit_listeners
1911                         self._exit_listeners = None
1912
1913                         self._exit_listener_stack.reverse()
1914                         while self._exit_listener_stack:
1915                                 self._exit_listener_stack.pop()(self)
1916
1917 class AbstractPollTask(AsynchronousTask):
1918
1919         __slots__ = ("scheduler",) + \
1920                 ("_registered",)
1921
1922         _bufsize = 4096
1923         _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1924         _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1925                 _exceptional_events
1926
1927         def _unregister(self):
1928                 raise NotImplementedError(self)
1929
1930         def _unregister_if_appropriate(self, event):
1931                 if self._registered:
1932                         if event & self._exceptional_events:
1933                                 self._unregister()
1934                                 self.cancel()
1935                         elif event & PollConstants.POLLHUP:
1936                                 self._unregister()
1937                                 self.wait()
1938
1939 class PipeReader(AbstractPollTask):
1940
1941         """
1942         Reads output from one or more files and saves it in memory,
1943         for retrieval via the getvalue() method. This is driven by
1944         the scheduler's poll() loop, so it runs entirely within the
1945         current process.
1946         """
1947
1948         __slots__ = ("input_files",) + \
1949                 ("_read_data", "_reg_ids")
1950
1951         def _start(self):
1952                 self._reg_ids = set()
1953                 self._read_data = []
1954                 for k, f in self.input_files.iteritems():
1955                         fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1956                                 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1957                         self._reg_ids.add(self.scheduler.register(f.fileno(),
1958                                 self._registered_events, self._output_handler))
1959                 self._registered = True
1960
1961         def isAlive(self):
1962                 return self._registered
1963
1964         def cancel(self):
1965                 if self.returncode is None:
1966                         self.returncode = 1
1967                         self.cancelled = True
1968                 self.wait()
1969
1970         def _wait(self):
1971                 if self.returncode is not None:
1972                         return self.returncode
1973
1974                 if self._registered:
1975                         self.scheduler.schedule(self._reg_ids)
1976                         self._unregister()
1977
1978                 self.returncode = os.EX_OK
1979                 return self.returncode
1980
1981         def getvalue(self):
1982                 """Retrieve the entire contents"""
1983                 if sys.hexversion >= 0x3000000:
1984                         return bytes().join(self._read_data)
1985                 return "".join(self._read_data)
1986
1987         def close(self):
1988                 """Free the memory buffer."""
1989                 self._read_data = None
1990
1991         def _output_handler(self, fd, event):
1992
1993                 if event & PollConstants.POLLIN:
1994
1995                         for f in self.input_files.itervalues():
1996                                 if fd == f.fileno():
1997                                         break
1998
1999                         buf = array.array('B')
2000                         try:
2001                                 buf.fromfile(f, self._bufsize)
2002                         except EOFError:
2003                                 pass
2004
2005                         if buf:
2006                                 self._read_data.append(buf.tostring())
2007                         else:
2008                                 self._unregister()
2009                                 self.wait()
2010
2011                 self._unregister_if_appropriate(event)
2012                 return self._registered
2013
2014         def _unregister(self):
2015                 """
2016                 Unregister from the scheduler and close open files.
2017                 """
2018
2019                 self._registered = False
2020
2021                 if self._reg_ids is not None:
2022                         for reg_id in self._reg_ids:
2023                                 self.scheduler.unregister(reg_id)
2024                         self._reg_ids = None
2025
2026                 if self.input_files is not None:
2027                         for f in self.input_files.itervalues():
2028                                 f.close()
2029                         self.input_files = None
2030
2031 class CompositeTask(AsynchronousTask):
2032
2033         __slots__ = ("scheduler",) + ("_current_task",)
2034
2035         def isAlive(self):
2036                 return self._current_task is not None
2037
2038         def cancel(self):
2039                 self.cancelled = True
2040                 if self._current_task is not None:
2041                         self._current_task.cancel()
2042
2043         def _poll(self):
2044                 """
2045                 This does a loop calling self._current_task.poll()
2046                 repeatedly as long as the value of self._current_task
2047                 keeps changing. It calls poll() a maximum of one time
2048                 for a given self._current_task instance. This is useful
2049                 since calling poll() on a task can trigger advance to
2050                 the next task could eventually lead to the returncode
2051                 being set in cases when polling only a single task would
2052                 not have the same effect.
2053                 """
2054
2055                 prev = None
2056                 while True:
2057                         task = self._current_task
2058                         if task is None or task is prev:
2059                                 # don't poll the same task more than once
2060                                 break
2061                         task.poll()
2062                         prev = task
2063
2064                 return self.returncode
2065
2066         def _wait(self):
2067
2068                 prev = None
2069                 while True:
2070                         task = self._current_task
2071                         if task is None:
2072                                 # don't wait for the same task more than once
2073                                 break
2074                         if task is prev:
2075                                 # Before the task.wait() method returned, an exit
2076                                 # listener should have set self._current_task to either
2077                                 # a different task or None. Something is wrong.
2078                                 raise AssertionError("self._current_task has not " + \
2079                                         "changed since calling wait", self, task)
2080                         task.wait()
2081                         prev = task
2082
2083                 return self.returncode
2084
2085         def _assert_current(self, task):
2086                 """
2087                 Raises an AssertionError if the given task is not the
2088                 same one as self._current_task. This can be useful
2089                 for detecting bugs.
2090                 """
2091                 if task is not self._current_task:
2092                         raise AssertionError("Unrecognized task: %s" % (task,))
2093
2094         def _default_exit(self, task):
2095                 """
2096                 Calls _assert_current() on the given task and then sets the
2097                 composite returncode attribute if task.returncode != os.EX_OK.
2098                 If the task failed then self._current_task will be set to None.
2099                 Subclasses can use this as a generic task exit callback.
2100
2101                 @rtype: int
2102                 @returns: The task.returncode attribute.
2103                 """
2104                 self._assert_current(task)
2105                 if task.returncode != os.EX_OK:
2106                         self.returncode = task.returncode
2107                         self._current_task = None
2108                 return task.returncode
2109
2110         def _final_exit(self, task):
2111                 """
2112                 Assumes that task is the final task of this composite task.
2113                 Calls _default_exit() and sets self.returncode to the task's
2114                 returncode and sets self._current_task to None.
2115                 """
2116                 self._default_exit(task)
2117                 self._current_task = None
2118                 self.returncode = task.returncode
2119                 return self.returncode
2120
2121         def _default_final_exit(self, task):
2122                 """
2123                 This calls _final_exit() and then wait().
2124
2125                 Subclasses can use this as a generic final task exit callback.
2126
2127                 """
2128                 self._final_exit(task)
2129                 return self.wait()
2130
2131         def _start_task(self, task, exit_handler):
2132                 """
2133                 Register exit handler for the given task, set it
2134                 as self._current_task, and call task.start().
2135
2136                 Subclasses can use this as a generic way to start
2137                 a task.
2138
2139                 """
2140                 task.addExitListener(exit_handler)
2141                 self._current_task = task
2142                 task.start()
2143
2144 class TaskSequence(CompositeTask):
2145         """
2146         A collection of tasks that executes sequentially. Each task
2147         must have a addExitListener() method that can be used as
2148         a means to trigger movement from one task to the next.
2149         """
2150
2151         __slots__ = ("_task_queue",)
2152
2153         def __init__(self, **kwargs):
2154                 AsynchronousTask.__init__(self, **kwargs)
2155                 self._task_queue = deque()
2156
2157         def add(self, task):
2158                 self._task_queue.append(task)
2159
2160         def _start(self):
2161                 self._start_next_task()
2162
2163         def cancel(self):
2164                 self._task_queue.clear()
2165                 CompositeTask.cancel(self)
2166
2167         def _start_next_task(self):
2168                 self._start_task(self._task_queue.popleft(),
2169                         self._task_exit_handler)
2170
2171         def _task_exit_handler(self, task):
2172                 if self._default_exit(task) != os.EX_OK:
2173                         self.wait()
2174                 elif self._task_queue:
2175                         self._start_next_task()
2176                 else:
2177                         self._final_exit(task)
2178                         self.wait()
2179
2180 class SubProcess(AbstractPollTask):
2181
2182         __slots__ = ("pid",) + \
2183                 ("_files", "_reg_id")
2184
2185         # A file descriptor is required for the scheduler to monitor changes from
2186         # inside a poll() loop. When logging is not enabled, create a pipe just to
2187         # serve this purpose alone.
2188         _dummy_pipe_fd = 9
2189
2190         def _poll(self):
2191                 if self.returncode is not None:
2192                         return self.returncode
2193                 if self.pid is None:
2194                         return self.returncode
2195                 if self._registered:
2196                         return self.returncode
2197
2198                 try:
2199                         retval = os.waitpid(self.pid, os.WNOHANG)
2200                 except OSError, e:
2201                         if e.errno != errno.ECHILD:
2202                                 raise
2203                         del e
2204                         retval = (self.pid, 1)
2205
2206                 if retval == (0, 0):
2207                         return None
2208                 self._set_returncode(retval)
2209                 return self.returncode
2210
2211         def cancel(self):
2212                 if self.isAlive():
2213                         try:
2214                                 os.kill(self.pid, signal.SIGTERM)
2215                         except OSError, e:
2216                                 if e.errno != errno.ESRCH:
2217                                         raise
2218                                 del e
2219
2220                 self.cancelled = True
2221                 if self.pid is not None:
2222                         self.wait()
2223                 return self.returncode
2224
2225         def isAlive(self):
2226                 return self.pid is not None and \
2227                         self.returncode is None
2228
2229         def _wait(self):
2230
2231                 if self.returncode is not None:
2232                         return self.returncode
2233
2234                 if self._registered:
2235                         self.scheduler.schedule(self._reg_id)
2236                         self._unregister()
2237                         if self.returncode is not None:
2238                                 return self.returncode
2239
2240                 try:
2241                         wait_retval = os.waitpid(self.pid, 0)
2242                 except OSError, e:
2243                         if e.errno != errno.ECHILD:
2244                                 raise
2245                         del e
2246                         self._set_returncode((self.pid, 1))
2247                 else:
2248                         self._set_returncode(wait_retval)
2249
2250                 return self.returncode
2251
2252         def _unregister(self):
2253                 """
2254                 Unregister from the scheduler and close open files.
2255                 """
2256
2257                 self._registered = False
2258
2259                 if self._reg_id is not None:
2260                         self.scheduler.unregister(self._reg_id)
2261                         self._reg_id = None
2262
2263                 if self._files is not None:
2264                         for f in self._files.itervalues():
2265                                 f.close()
2266                         self._files = None
2267
2268         def _set_returncode(self, wait_retval):
2269
2270                 retval = wait_retval[1]
2271
2272                 if retval != os.EX_OK:
2273                         if retval & 0xff:
2274                                 retval = (retval & 0xff) << 8
2275                         else:
2276                                 retval = retval >> 8
2277
2278                 self.returncode = retval
2279
2280 class SpawnProcess(SubProcess):
2281
2282         """
2283         Constructor keyword args are passed into portage.process.spawn().
2284         The required "args" keyword argument will be passed as the first
2285         spawn() argument.
2286         """
2287
2288         _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2289                 "uid", "gid", "groups", "umask", "logfile",
2290                 "path_lookup", "pre_exec")
2291
2292         __slots__ = ("args",) + \
2293                 _spawn_kwarg_names
2294
2295         _file_names = ("log", "process", "stdout")
2296         _files_dict = slot_dict_class(_file_names, prefix="")
2297
2298         def _start(self):
2299
2300                 if self.cancelled:
2301                         return
2302
2303                 if self.fd_pipes is None:
2304                         self.fd_pipes = {}
2305                 fd_pipes = self.fd_pipes
2306                 fd_pipes.setdefault(0, sys.stdin.fileno())
2307                 fd_pipes.setdefault(1, sys.stdout.fileno())
2308                 fd_pipes.setdefault(2, sys.stderr.fileno())
2309
2310                 # flush any pending output
2311                 for fd in fd_pipes.itervalues():
2312                         if fd == sys.stdout.fileno():
2313                                 sys.stdout.flush()
2314                         if fd == sys.stderr.fileno():
2315                                 sys.stderr.flush()
2316
2317                 logfile = self.logfile
2318                 self._files = self._files_dict()
2319                 files = self._files
2320
2321                 master_fd, slave_fd = self._pipe(fd_pipes)
2322                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2323                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2324
2325                 null_input = None
2326                 fd_pipes_orig = fd_pipes.copy()
2327                 if self.background:
2328                         # TODO: Use job control functions like tcsetpgrp() to control
2329                         # access to stdin. Until then, use /dev/null so that any
2330                         # attempts to read from stdin will immediately return EOF
2331                         # instead of blocking indefinitely.
2332                         null_input = open('/dev/null', 'rb')
2333                         fd_pipes[0] = null_input.fileno()
2334                 else:
2335                         fd_pipes[0] = fd_pipes_orig[0]
2336
2337                 files.process = os.fdopen(master_fd, 'rb')
2338                 if logfile is not None:
2339
2340                         fd_pipes[1] = slave_fd
2341                         fd_pipes[2] = slave_fd
2342
2343                         files.log = open(logfile, mode='ab')
2344                         portage.util.apply_secpass_permissions(logfile,
2345                                 uid=portage.portage_uid, gid=portage.portage_gid,
2346                                 mode=0660)
2347
2348                         if not self.background:
2349                                 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2350
2351                         output_handler = self._output_handler
2352
2353                 else:
2354
2355                         # Create a dummy pipe so the scheduler can monitor
2356                         # the process from inside a poll() loop.
2357                         fd_pipes[self._dummy_pipe_fd] = slave_fd
2358                         if self.background:
2359                                 fd_pipes[1] = slave_fd
2360                                 fd_pipes[2] = slave_fd
2361                         output_handler = self._dummy_handler
2362
2363                 kwargs = {}
2364                 for k in self._spawn_kwarg_names:
2365                         v = getattr(self, k)
2366                         if v is not None:
2367                                 kwargs[k] = v
2368
2369                 kwargs["fd_pipes"] = fd_pipes
2370                 kwargs["returnpid"] = True
2371                 kwargs.pop("logfile", None)
2372
2373                 self._reg_id = self.scheduler.register(files.process.fileno(),
2374                         self._registered_events, output_handler)
2375                 self._registered = True
2376
2377                 retval = self._spawn(self.args, **kwargs)
2378
2379                 os.close(slave_fd)
2380                 if null_input is not None:
2381                         null_input.close()
2382
2383                 if isinstance(retval, int):
2384                         # spawn failed
2385                         self._unregister()
2386                         self.returncode = retval
2387                         self.wait()
2388                         return
2389
2390                 self.pid = retval[0]
2391                 portage.process.spawned_pids.remove(self.pid)
2392
2393         def _pipe(self, fd_pipes):
2394                 """
2395                 @type fd_pipes: dict
2396                 @param fd_pipes: pipes from which to copy terminal size if desired.
2397                 """
2398                 return os.pipe()
2399
2400         def _spawn(self, args, **kwargs):
2401                 return portage.process.spawn(args, **kwargs)
2402
2403         def _output_handler(self, fd, event):
2404
2405                 if event & PollConstants.POLLIN:
2406
2407                         files = self._files
2408                         buf = array.array('B')
2409                         try:
2410                                 buf.fromfile(files.process, self._bufsize)
2411                         except EOFError:
2412                                 pass
2413
2414                         if buf:
2415                                 if not self.background:
2416                                         buf.tofile(files.stdout)
2417                                         files.stdout.flush()
2418                                 buf.tofile(files.log)
2419                                 files.log.flush()
2420                         else:
2421                                 self._unregister()
2422                                 self.wait()
2423
2424                 self._unregister_if_appropriate(event)
2425                 return self._registered
2426
2427         def _dummy_handler(self, fd, event):
2428                 """
2429                 This method is mainly interested in detecting EOF, since
2430                 the only purpose of the pipe is to allow the scheduler to
2431                 monitor the process from inside a poll() loop.
2432                 """
2433
2434                 if event & PollConstants.POLLIN:
2435
2436                         buf = array.array('B')
2437                         try:
2438                                 buf.fromfile(self._files.process, self._bufsize)
2439                         except EOFError:
2440                                 pass
2441
2442                         if buf:
2443                                 pass
2444                         else:
2445                                 self._unregister()
2446                                 self.wait()
2447
2448                 self._unregister_if_appropriate(event)
2449                 return self._registered
2450
2451 class MiscFunctionsProcess(SpawnProcess):
2452         """
2453         Spawns misc-functions.sh with an existing ebuild environment.
2454         """
2455
2456         __slots__ = ("commands", "phase", "pkg", "settings")
2457
2458         def _start(self):
2459                 settings = self.settings
2460                 settings.pop("EBUILD_PHASE", None)
2461                 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2462                 misc_sh_binary = os.path.join(portage_bin_path,
2463                         os.path.basename(portage.const.MISC_SH_BINARY))
2464
2465                 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2466                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2467
2468                 portage._doebuild_exit_status_unlink(
2469                         settings.get("EBUILD_EXIT_STATUS_FILE"))
2470
2471                 SpawnProcess._start(self)
2472
2473         def _spawn(self, args, **kwargs):
2474                 settings = self.settings
2475                 debug = settings.get("PORTAGE_DEBUG") == "1"
2476                 return portage.spawn(" ".join(args), settings,
2477                         debug=debug, **kwargs)
2478
2479         def _set_returncode(self, wait_retval):
2480                 SpawnProcess._set_returncode(self, wait_retval)
2481                 self.returncode = portage._doebuild_exit_status_check_and_log(
2482                         self.settings, self.phase, self.returncode)
2483
2484 class EbuildFetcher(SpawnProcess):
2485
2486         __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2487                 ("_build_dir",)
2488
2489         def _start(self):
2490
2491                 root_config = self.pkg.root_config
2492                 portdb = root_config.trees["porttree"].dbapi
2493                 ebuild_path = portdb.findname(self.pkg.cpv)
2494                 settings = self.config_pool.allocate()
2495                 settings.setcpv(self.pkg)
2496
2497                 # In prefetch mode, logging goes to emerge-fetch.log and the builddir
2498                 # should not be touched since otherwise it could interfere with
2499                 # another instance of the same cpv concurrently being built for a
2500                 # different $ROOT (currently, builds only cooperate with prefetchers
2501                 # that are spawned for the same $ROOT).
2502                 if not self.prefetch:
2503                         self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2504                         self._build_dir.lock()
2505                         self._build_dir.clean()
2506                         portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2507                         if self.logfile is None:
2508                                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2509
2510                 phase = "fetch"
2511                 if self.fetchall:
2512                         phase = "fetchall"
2513
2514                 # If any incremental variables have been overridden
2515                 # via the environment, those values need to be passed
2516                 # along here so that they are correctly considered by
2517                 # the config instance in the subproccess.
2518                 fetch_env = os.environ.copy()
2519
2520                 nocolor = settings.get("NOCOLOR")
2521                 if nocolor is not None:
2522                         fetch_env["NOCOLOR"] = nocolor
2523
2524                 fetch_env["PORTAGE_NICENESS"] = "0"
2525                 if self.prefetch:
2526                         fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2527
2528                 ebuild_binary = os.path.join(
2529                         settings["PORTAGE_BIN_PATH"], "ebuild")
2530
2531                 fetch_args = [ebuild_binary, ebuild_path, phase]
2532                 debug = settings.get("PORTAGE_DEBUG") == "1"
2533                 if debug:
2534                         fetch_args.append("--debug")
2535
2536                 self.args = fetch_args
2537                 self.env = fetch_env
2538                 SpawnProcess._start(self)
2539
2540         def _pipe(self, fd_pipes):
2541                 """When appropriate, use a pty so that fetcher progress bars,
2542                 like wget has, will work properly."""
2543                 if self.background or not sys.stdout.isatty():
2544                         # When the output only goes to a log file,
2545                         # there's no point in creating a pty.
2546                         return os.pipe()
2547                 stdout_pipe = fd_pipes.get(1)
2548                 got_pty, master_fd, slave_fd = \
2549                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2550                 return (master_fd, slave_fd)
2551
2552         def _set_returncode(self, wait_retval):
2553                 SpawnProcess._set_returncode(self, wait_retval)
2554                 # Collect elog messages that might have been
2555                 # created by the pkg_nofetch phase.
2556                 if self._build_dir is not None:
2557                         # Skip elog messages for prefetch, in order to avoid duplicates.
2558                         if not self.prefetch and self.returncode != os.EX_OK:
2559                                 elog_out = None
2560                                 if self.logfile is not None:
2561                                         if self.background:
2562                                                 elog_out = open(self.logfile, 'a')
2563                                 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2564                                 if self.logfile is not None:
2565                                         msg += ", Log file:"
2566                                 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2567                                 if self.logfile is not None:
2568                                         eerror(" '%s'" % (self.logfile,),
2569                                                 phase="unpack", key=self.pkg.cpv, out=elog_out)
2570                                 if elog_out is not None:
2571                                         elog_out.close()
2572                         if not self.prefetch:
2573                                 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2574                         features = self._build_dir.settings.features
2575                         if self.returncode == os.EX_OK:
2576                                 self._build_dir.clean()
2577                         self._build_dir.unlock()
2578                         self.config_pool.deallocate(self._build_dir.settings)
2579                         self._build_dir = None
2580
2581 class EbuildBuildDir(SlotObject):
2582
2583         __slots__ = ("dir_path", "pkg", "settings",
2584                 "locked", "_catdir", "_lock_obj")
2585
2586         def __init__(self, **kwargs):
2587                 SlotObject.__init__(self, **kwargs)
2588                 self.locked = False
2589
2590         def lock(self):
2591                 """
2592                 This raises an AlreadyLocked exception if lock() is called
2593                 while a lock is already held. In order to avoid this, call
2594                 unlock() or check whether the "locked" attribute is True
2595                 or False before calling lock().
2596                 """
2597                 if self._lock_obj is not None:
2598                         raise self.AlreadyLocked((self._lock_obj,))
2599
2600                 dir_path = self.dir_path
2601                 if dir_path is None:
2602                         root_config = self.pkg.root_config
2603                         portdb = root_config.trees["porttree"].dbapi
2604                         ebuild_path = portdb.findname(self.pkg.cpv)
2605                         settings = self.settings
2606                         settings.setcpv(self.pkg)
2607                         debug = settings.get("PORTAGE_DEBUG") == "1"
2608                         use_cache = 1 # always true
2609                         portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2610                                 self.settings, debug, use_cache, portdb)
2611                         dir_path = self.settings["PORTAGE_BUILDDIR"]
2612
2613                 catdir = os.path.dirname(dir_path)
2614                 self._catdir = catdir
2615
2616                 portage.util.ensure_dirs(os.path.dirname(catdir),
2617                         gid=portage.portage_gid,
2618                         mode=070, mask=0)
2619                 catdir_lock = None
2620                 try:
2621                         catdir_lock = portage.locks.lockdir(catdir)
2622                         portage.util.ensure_dirs(catdir,
2623                                 gid=portage.portage_gid,
2624                                 mode=070, mask=0)
2625                         self._lock_obj = portage.locks.lockdir(dir_path)
2626                 finally:
2627                         self.locked = self._lock_obj is not None
2628                         if catdir_lock is not None:
2629                                 portage.locks.unlockdir(catdir_lock)
2630
2631         def clean(self):
2632                 """Uses shutil.rmtree() rather than spawning a 'clean' phase. Disabled
2633                 by keepwork or keeptemp in FEATURES."""
2634                 settings = self.settings
2635                 features = settings.features
2636                 if not ("keepwork" in features or "keeptemp" in features):
2637                         try:
2638                                 shutil.rmtree(settings["PORTAGE_BUILDDIR"])
2639                         except EnvironmentError, e:
2640                                 if e.errno != errno.ENOENT:
2641                                         raise
2642                                 del e
2643
2644         def unlock(self):
2645                 if self._lock_obj is None:
2646                         return
2647
2648                 portage.locks.unlockdir(self._lock_obj)
2649                 self._lock_obj = None
2650                 self.locked = False
2651
2652                 catdir = self._catdir
2653                 catdir_lock = None
2654                 try:
2655                         catdir_lock = portage.locks.lockdir(catdir)
2656                 finally:
2657                         if catdir_lock:
2658                                 try:
2659                                         os.rmdir(catdir)
2660                                 except OSError, e:
2661                                         if e.errno not in (errno.ENOENT,
2662                                                 errno.ENOTEMPTY, errno.EEXIST):
2663                                                 raise
2664                                         del e
2665                                 portage.locks.unlockdir(catdir_lock)
2666
2667         class AlreadyLocked(portage.exception.PortageException):
2668                 pass
2669
2670 class EbuildBuild(CompositeTask):
2671
2672         __slots__ = ("args_set", "config_pool", "find_blockers",
2673                 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2674                 "prefetcher", "settings", "world_atom") + \
2675                 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2676
2677         def _start(self):
2678
2679                 logger = self.logger
2680                 opts = self.opts
2681                 pkg = self.pkg
2682                 settings = self.settings
2683                 world_atom = self.world_atom
2684                 root_config = pkg.root_config
2685                 tree = "porttree"
2686                 self._tree = tree
2687                 portdb = root_config.trees[tree].dbapi
2688                 settings.setcpv(pkg)
2689                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2690                 ebuild_path = portdb.findname(self.pkg.cpv)
2691                 self._ebuild_path = ebuild_path
2692
2693                 prefetcher = self.prefetcher
2694                 if prefetcher is None:
2695                         pass
2696                 elif not prefetcher.isAlive():
2697                         prefetcher.cancel()
2698                 elif prefetcher.poll() is None:
2699
2700                         waiting_msg = "Fetching files " + \
2701                                 "in the background. " + \
2702                                 "To view fetch progress, run `tail -f " + \
2703                                 "/var/log/emerge-fetch.log` in another " + \
2704                                 "terminal."
2705                         msg_prefix = colorize("GOOD", " * ")
2706                         from textwrap import wrap
2707                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2708                                 for line in wrap(waiting_msg, 65))
2709                         if not self.background:
2710                                 writemsg(waiting_msg, noiselevel=-1)
2711
2712                         self._current_task = prefetcher
2713                         prefetcher.addExitListener(self._prefetch_exit)
2714                         return
2715
2716                 self._prefetch_exit(prefetcher)
2717
2718         def _prefetch_exit(self, prefetcher):
2719
2720                 opts = self.opts
2721                 pkg = self.pkg
2722                 settings = self.settings
2723
2724                 if opts.fetchonly:
2725                                 fetcher = EbuildFetchonly(
2726                                         fetch_all=opts.fetch_all_uri,
2727                                         pkg=pkg, pretend=opts.pretend,
2728                                         settings=settings)
2729                                 retval = fetcher.execute()
2730                                 self.returncode = retval
2731                                 self.wait()
2732                                 return
2733
2734                 fetcher = EbuildFetcher(config_pool=self.config_pool,
2735                         fetchall=opts.fetch_all_uri,
2736                         fetchonly=opts.fetchonly,
2737                         background=self.background,
2738                         pkg=pkg, scheduler=self.scheduler)
2739
2740                 self._start_task(fetcher, self._fetch_exit)
2741
2742         def _fetch_exit(self, fetcher):
2743                 opts = self.opts
2744                 pkg = self.pkg
2745
2746                 fetch_failed = False
2747                 if opts.fetchonly:
2748                         fetch_failed = self._final_exit(fetcher) != os.EX_OK
2749                 else:
2750                         fetch_failed = self._default_exit(fetcher) != os.EX_OK
2751
2752                 if fetch_failed and fetcher.logfile is not None and \
2753                         os.path.exists(fetcher.logfile):
2754                         self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2755
2756                 if not fetch_failed and fetcher.logfile is not None:
2757                         # Fetch was successful, so remove the fetch log.
2758                         try:
2759                                 os.unlink(fetcher.logfile)
2760                         except OSError:
2761                                 pass
2762
2763                 if fetch_failed or opts.fetchonly:
2764                         self.wait()
2765                         return
2766
2767                 logger = self.logger
2768                 opts = self.opts
2769                 pkg_count = self.pkg_count
2770                 scheduler = self.scheduler
2771                 settings = self.settings
2772                 features = settings.features
2773                 ebuild_path = self._ebuild_path
2774                 system_set = pkg.root_config.sets["system"]
2775
2776                 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2777                 self._build_dir.lock()
2778
2779                 # Cleaning is triggered before the setup
2780                 # phase, in portage.doebuild().
2781                 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2782                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2783                 short_msg = "emerge: (%s of %s) %s Clean" % \
2784                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2785                 logger.log(msg, short_msg=short_msg)
2786
2787                 #buildsyspkg: Check if we need to _force_ binary package creation
2788                 self._issyspkg = "buildsyspkg" in features and \
2789                                 system_set.findAtomForPackage(pkg) and \
2790                                 not opts.buildpkg
2791
2792                 if opts.buildpkg or self._issyspkg:
2793
2794                         self._buildpkg = True
2795
2796                         msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2797                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2798                         short_msg = "emerge: (%s of %s) %s Compile" % \
2799                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2800                         logger.log(msg, short_msg=short_msg)
2801
2802                 else:
2803                         msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2804                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2805                         short_msg = "emerge: (%s of %s) %s Compile" % \
2806                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2807                         logger.log(msg, short_msg=short_msg)
2808
2809                 build = EbuildExecuter(background=self.background, pkg=pkg,
2810                         scheduler=scheduler, settings=settings)
2811                 self._start_task(build, self._build_exit)
2812
2813         def _unlock_builddir(self):
2814                 portage.elog.elog_process(self.pkg.cpv, self.settings)
2815                 self._build_dir.unlock()
2816
2817         def _build_exit(self, build):
2818                 if self._default_exit(build) != os.EX_OK:
2819                         self._unlock_builddir()
2820                         self.wait()
2821                         return
2822
2823                 opts = self.opts
2824                 buildpkg = self._buildpkg
2825
2826                 if not buildpkg:
2827                         self._final_exit(build)
2828                         self.wait()
2829                         return
2830
2831                 if self._issyspkg:
2832                         msg = ">>> This is a system package, " + \
2833                                 "let's pack a rescue tarball.\n"
2834
2835                         log_path = self.settings.get("PORTAGE_LOG_FILE")
2836                         if log_path is not None:
2837                                 log_file = open(log_path, 'a')
2838                                 try:
2839                                         log_file.write(msg)
2840                                 finally:
2841                                         log_file.close()
2842
2843                         if not self.background:
2844                                 portage.writemsg_stdout(msg, noiselevel=-1)
2845
2846                 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2847                         scheduler=self.scheduler, settings=self.settings)
2848
2849                 self._start_task(packager, self._buildpkg_exit)
2850
2851         def _buildpkg_exit(self, packager):
2852                 """
2853                 Released build dir lock when there is a failure or
2854                 when in buildpkgonly mode. Otherwise, the lock will
2855                 be released when merge() is called.
2856                 """
2857
2858                 if self._default_exit(packager) != os.EX_OK:
2859                         self._unlock_builddir()
2860                         self.wait()
2861                         return
2862
2863                 if self.opts.buildpkgonly:
2864                         # Need to call "clean" phase for buildpkgonly mode
2865                         portage.elog.elog_process(self.pkg.cpv, self.settings)
2866                         phase = "clean"
2867                         clean_phase = EbuildPhase(background=self.background,
2868                                 pkg=self.pkg, phase=phase,
2869                                 scheduler=self.scheduler, settings=self.settings,
2870                                 tree=self._tree)
2871                         self._start_task(clean_phase, self._clean_exit)
2872                         return
2873
2874                 # Continue holding the builddir lock until
2875                 # after the package has been installed.
2876                 self._current_task = None
2877                 self.returncode = packager.returncode
2878                 self.wait()
2879
2880         def _clean_exit(self, clean_phase):
2881                 if self._final_exit(clean_phase) != os.EX_OK or \
2882                         self.opts.buildpkgonly:
2883                         self._unlock_builddir()
2884                 self.wait()
2885
2886         def install(self):
2887                 """
2888                 Install the package and then clean up and release locks.
2889                 Only call this after the build has completed successfully
2890                 and neither fetchonly nor buildpkgonly mode are enabled.
2891                 """
2892
2893                 find_blockers = self.find_blockers
2894                 ldpath_mtimes = self.ldpath_mtimes
2895                 logger = self.logger
2896                 pkg = self.pkg
2897                 pkg_count = self.pkg_count
2898                 settings = self.settings
2899                 world_atom = self.world_atom
2900                 ebuild_path = self._ebuild_path
2901                 tree = self._tree
2902
2903                 merge = EbuildMerge(find_blockers=self.find_blockers,
2904                         ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2905                         pkg_count=pkg_count, pkg_path=ebuild_path,
2906                         scheduler=self.scheduler,
2907                         settings=settings, tree=tree, world_atom=world_atom)
2908
2909                 msg = " === (%s of %s) Merging (%s::%s)" % \
2910                         (pkg_count.curval, pkg_count.maxval,
2911                         pkg.cpv, ebuild_path)
2912                 short_msg = "emerge: (%s of %s) %s Merge" % \
2913                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2914                 logger.log(msg, short_msg=short_msg)
2915
2916                 try:
2917                         rval = merge.execute()
2918                 finally:
2919                         self._unlock_builddir()
2920
2921                 return rval
2922
2923 class EbuildExecuter(CompositeTask):
2924
2925         __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2926
2927         _phases = ("prepare", "configure", "compile", "test", "install")
2928
2929         _live_eclasses = frozenset([
2930                 "bzr",
2931                 "cvs",
2932                 "darcs",
2933                 "git",
2934                 "mercurial",
2935                 "subversion"
2936         ])
2937
2938         def _start(self):
2939                 self._tree = "porttree"
2940                 pkg = self.pkg
2941                 phase = "clean"
2942                 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2943                         scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2944                 self._start_task(clean_phase, self._clean_phase_exit)
2945
2946         def _clean_phase_exit(self, clean_phase):
2947
2948                 if self._default_exit(clean_phase) != os.EX_OK:
2949                         self.wait()
2950                         return
2951
2952                 pkg = self.pkg
2953                 scheduler = self.scheduler
2954                 settings = self.settings
2955                 cleanup = 1
2956
2957                 # This initializes PORTAGE_LOG_FILE.
2958                 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2959
2960                 setup_phase = EbuildPhase(background=self.background,
2961                         pkg=pkg, phase="setup", scheduler=scheduler,
2962                         settings=settings, tree=self._tree)
2963
2964                 setup_phase.addExitListener(self._setup_exit)
2965                 self._current_task = setup_phase
2966                 self.scheduler.scheduleSetup(setup_phase)
2967
2968         def _setup_exit(self, setup_phase):
2969
2970                 if self._default_exit(setup_phase) != os.EX_OK:
2971                         self.wait()
2972                         return
2973
2974                 unpack_phase = EbuildPhase(background=self.background,
2975                         pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2976                         settings=self.settings, tree=self._tree)
2977
2978                 if self._live_eclasses.intersection(self.pkg.inherited):
2979                         # Serialize $DISTDIR access for live ebuilds since
2980                         # otherwise they can interfere with eachother.
2981
2982                         unpack_phase.addExitListener(self._unpack_exit)
2983                         self._current_task = unpack_phase
2984                         self.scheduler.scheduleUnpack(unpack_phase)
2985
2986                 else:
2987                         self._start_task(unpack_phase, self._unpack_exit)
2988
2989         def _unpack_exit(self, unpack_phase):
2990
2991                 if self._default_exit(unpack_phase) != os.EX_OK:
2992                         self.wait()
2993                         return
2994
2995                 ebuild_phases = TaskSequence(scheduler=self.scheduler)
2996
2997                 pkg = self.pkg
2998                 phases = self._phases
2999                 eapi = pkg.metadata["EAPI"]
3000                 if eapi in ("0", "1"):
3001                         # skip src_prepare and src_configure
3002                         phases = phases[2:]
3003
3004                 for phase in phases:
3005                         ebuild_phases.add(EbuildPhase(background=self.background,
3006                                 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3007                                 settings=self.settings, tree=self._tree))
3008
3009                 self._start_task(ebuild_phases, self._default_final_exit)
3010
3011 class EbuildMetadataPhase(SubProcess):
3012
3013         """
3014         Asynchronous interface for the ebuild "depend" phase which is
3015         used to extract metadata from the ebuild.
3016         """
3017
3018         __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3019                 "ebuild_mtime", "portdb", "repo_path", "settings") + \
3020                 ("_raw_metadata",)
3021
3022         _file_names = ("ebuild",)
3023         _files_dict = slot_dict_class(_file_names, prefix="")
3024         _metadata_fd = 9
3025
3026         def _start(self):
3027                 settings = self.settings
3028                 settings.reset()
3029                 ebuild_path = self.ebuild_path
3030                 debug = settings.get("PORTAGE_DEBUG") == "1"
3031                 master_fd = None
3032                 slave_fd = None
3033                 fd_pipes = None
3034                 if self.fd_pipes is not None:
3035                         fd_pipes = self.fd_pipes.copy()
3036                 else:
3037                         fd_pipes = {}
3038
3039                 fd_pipes.setdefault(0, sys.stdin.fileno())
3040                 fd_pipes.setdefault(1, sys.stdout.fileno())
3041                 fd_pipes.setdefault(2, sys.stderr.fileno())
3042
3043                 # flush any pending output
3044                 for fd in fd_pipes.itervalues():
3045                         if fd == sys.stdout.fileno():
3046                                 sys.stdout.flush()
3047                         if fd == sys.stderr.fileno():
3048                                 sys.stderr.flush()
3049
3050                 fd_pipes_orig = fd_pipes.copy()
3051                 self._files = self._files_dict()
3052                 files = self._files
3053
3054                 master_fd, slave_fd = os.pipe()
3055                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3056                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3057
3058                 fd_pipes[self._metadata_fd] = slave_fd
3059
3060                 self._raw_metadata = []
3061                 files.ebuild = os.fdopen(master_fd, 'r')
3062                 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3063                         self._registered_events, self._output_handler)
3064                 self._registered = True
3065
3066                 retval = portage.doebuild(ebuild_path, "depend",
3067                         settings["ROOT"], settings, debug,
3068                         mydbapi=self.portdb, tree="porttree",
3069                         fd_pipes=fd_pipes, returnpid=True)
3070
3071                 os.close(slave_fd)
3072
3073                 if isinstance(retval, int):
3074                         # doebuild failed before spawning
3075                         self._unregister()
3076                         self.returncode = retval
3077                         self.wait()
3078                         return
3079
3080                 self.pid = retval[0]
3081                 portage.process.spawned_pids.remove(self.pid)
3082
3083         def _output_handler(self, fd, event):
3084
3085                 if event & PollConstants.POLLIN:
3086                         self._raw_metadata.append(self._files.ebuild.read())
3087                         if not self._raw_metadata[-1]:
3088                                 self._unregister()
3089                                 self.wait()
3090
3091                 self._unregister_if_appropriate(event)
3092                 return self._registered
3093
3094         def _set_returncode(self, wait_retval):
3095                 SubProcess._set_returncode(self, wait_retval)
3096                 if self.returncode == os.EX_OK:
3097                         metadata_lines = "".join(self._raw_metadata).splitlines()
3098                         if len(portage.auxdbkeys) != len(metadata_lines):
3099                                 # Don't trust bash's returncode if the
3100                                 # number of lines is incorrect.
3101                                 self.returncode = 1
3102                         else:
3103                                 metadata = izip(portage.auxdbkeys, metadata_lines)
3104                                 self.metadata_callback(self.cpv, self.ebuild_path,
3105                                         self.repo_path, metadata, self.ebuild_mtime)
3106
3107 class EbuildProcess(SpawnProcess):
3108
3109         __slots__ = ("phase", "pkg", "settings", "tree")
3110
3111         def _start(self):
3112                 # Don't open the log file during the clean phase since the
3113                 # open file can result in an nfs lock on $T/build.log which
3114                 # prevents the clean phase from removing $T.
3115                 if self.phase not in ("clean", "cleanrm"):
3116                         self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3117                 SpawnProcess._start(self)
3118
3119         def _pipe(self, fd_pipes):
3120                 stdout_pipe = fd_pipes.get(1)
3121                 got_pty, master_fd, slave_fd = \
3122                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3123                 return (master_fd, slave_fd)
3124
3125         def _spawn(self, args, **kwargs):
3126
3127                 root_config = self.pkg.root_config
3128                 tree = self.tree
3129                 mydbapi = root_config.trees[tree].dbapi
3130                 settings = self.settings
3131                 ebuild_path = settings["EBUILD"]
3132                 debug = settings.get("PORTAGE_DEBUG") == "1"
3133
3134                 rval = portage.doebuild(ebuild_path, self.phase,
3135                         root_config.root, settings, debug,
3136                         mydbapi=mydbapi, tree=tree, **kwargs)
3137
3138                 return rval
3139
3140         def _set_returncode(self, wait_retval):
3141                 SpawnProcess._set_returncode(self, wait_retval)
3142
3143                 if self.phase not in ("clean", "cleanrm"):
3144                         self.returncode = portage._doebuild_exit_status_check_and_log(
3145                                 self.settings, self.phase, self.returncode)
3146
3147                 if self.phase == "test" and self.returncode != os.EX_OK and \
3148                         "test-fail-continue" in self.settings.features:
3149                         self.returncode = os.EX_OK
3150
3151                 portage._post_phase_userpriv_perms(self.settings)
3152
3153 class EbuildPhase(CompositeTask):
3154
3155         __slots__ = ("background", "pkg", "phase",
3156                 "scheduler", "settings", "tree")
3157
3158         _post_phase_cmds = portage._post_phase_cmds
3159
3160         def _start(self):
3161
3162                 ebuild_process = EbuildProcess(background=self.background,
3163                         pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3164                         settings=self.settings, tree=self.tree)
3165
3166                 self._start_task(ebuild_process, self._ebuild_exit)
3167
3168         def _ebuild_exit(self, ebuild_process):
3169
3170                 if self.phase == "install":
3171                         out = None
3172                         log_path = self.settings.get("PORTAGE_LOG_FILE")
3173                         log_file = None
3174                         if self.background and log_path is not None:
3175                                 log_file = open(log_path, 'a')
3176                                 out = log_file
3177                         try:
3178                                 portage._check_build_log(self.settings, out=out)
3179                         finally:
3180                                 if log_file is not None:
3181                                         log_file.close()
3182
3183                 if self._default_exit(ebuild_process) != os.EX_OK:
3184                         self.wait()
3185                         return
3186
3187                 settings = self.settings
3188
3189                 if self.phase == "install":
3190                         portage._post_src_install_uid_fix(settings)
3191
3192                 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3193                 if post_phase_cmds is not None:
3194                         post_phase = MiscFunctionsProcess(background=self.background,
3195                                 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3196                                 scheduler=self.scheduler, settings=settings)
3197                         self._start_task(post_phase, self._post_phase_exit)
3198                         return
3199
3200                 self.returncode = ebuild_process.returncode
3201                 self._current_task = None
3202                 self.wait()
3203
3204         def _post_phase_exit(self, post_phase):
3205                 if self._final_exit(post_phase) != os.EX_OK:
3206                         writemsg("!!! post %s failed; exiting.\n" % self.phase,
3207                                 noiselevel=-1)
3208                 self._current_task = None
3209                 self.wait()
3210                 return
3211
3212 class EbuildBinpkg(EbuildProcess):
3213         """
3214         This assumes that src_install() has successfully completed.
3215         """
3216         __slots__ = ("_binpkg_tmpfile",)
3217
3218         def _start(self):
3219                 self.phase = "package"
3220                 self.tree = "porttree"
3221                 pkg = self.pkg
3222                 root_config = pkg.root_config
3223                 portdb = root_config.trees["porttree"].dbapi
3224                 bintree = root_config.trees["bintree"]
3225                 ebuild_path = portdb.findname(self.pkg.cpv)
3226                 settings = self.settings
3227                 debug = settings.get("PORTAGE_DEBUG") == "1"
3228
3229                 bintree.prevent_collision(pkg.cpv)
3230                 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3231                         pkg.cpv + ".tbz2." + str(os.getpid()))
3232                 self._binpkg_tmpfile = binpkg_tmpfile
3233                 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3234                 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3235
3236                 try:
3237                         EbuildProcess._start(self)
3238                 finally:
3239                         settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3240
3241         def _set_returncode(self, wait_retval):
3242                 EbuildProcess._set_returncode(self, wait_retval)
3243
3244                 pkg = self.pkg
3245                 bintree = pkg.root_config.trees["bintree"]
3246                 binpkg_tmpfile = self._binpkg_tmpfile
3247                 if self.returncode == os.EX_OK:
3248                         bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3249
3250 class EbuildMerge(SlotObject):
3251
3252         __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3253                 "pkg", "pkg_count", "pkg_path", "pretend",
3254                 "scheduler", "settings", "tree", "world_atom")
3255
3256         def execute(self):
3257                 root_config = self.pkg.root_config
3258                 settings = self.settings
3259                 retval = portage.merge(settings["CATEGORY"],
3260                         settings["PF"], settings["D"],
3261                         os.path.join(settings["PORTAGE_BUILDDIR"],
3262                         "build-info"), root_config.root, settings,
3263                         myebuild=settings["EBUILD"],
3264                         mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3265                         vartree=root_config.trees["vartree"],
3266                         prev_mtimes=self.ldpath_mtimes,
3267                         scheduler=self.scheduler,
3268                         blockers=self.find_blockers)
3269
3270                 if retval == os.EX_OK:
3271                         self.world_atom(self.pkg)
3272                         self._log_success()
3273
3274                 return retval
3275
3276         def _log_success(self):
3277                 pkg = self.pkg
3278                 pkg_count = self.pkg_count
3279                 pkg_path = self.pkg_path
3280                 logger = self.logger
3281                 if "noclean" not in self.settings.features:
3282                         short_msg = "emerge: (%s of %s) %s Clean Post" % \
3283                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3284                         logger.log((" === (%s of %s) " + \
3285                                 "Post-Build Cleaning (%s::%s)") % \
3286                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3287                                 short_msg=short_msg)
3288                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3289                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3290
3291 class PackageUninstall(AsynchronousTask):
3292
3293         __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3294
3295         def _start(self):
3296                 try:
3297                         unmerge(self.pkg.root_config, self.opts, "unmerge",
3298                                 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3299                                 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3300                                 writemsg_level=self._writemsg_level)
3301                 except UninstallFailure, e:
3302                         self.returncode = e.status
3303                 else:
3304                         self.returncode = os.EX_OK
3305                 self.wait()
3306
3307         def _writemsg_level(self, msg, level=0, noiselevel=0):
3308
3309                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3310                 background = self.background
3311
3312                 if log_path is None:
3313                         if not (background and level < logging.WARNING):
3314                                 portage.util.writemsg_level(msg,
3315                                         level=level, noiselevel=noiselevel)
3316                 else:
3317                         if not background:
3318                                 portage.util.writemsg_level(msg,
3319                                         level=level, noiselevel=noiselevel)
3320
3321                         f = open(log_path, 'a')
3322                         try:
3323                                 f.write(msg)
3324                         finally:
3325                                 f.close()
3326
3327 class Binpkg(CompositeTask):
3328
3329         __slots__ = ("find_blockers",
3330                 "ldpath_mtimes", "logger", "opts",
3331                 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3332                 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3333                 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3334
3335         def _writemsg_level(self, msg, level=0, noiselevel=0):
3336
3337                 if not self.background:
3338                         portage.util.writemsg_level(msg,
3339                                 level=level, noiselevel=noiselevel)
3340
3341                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3342                 if  log_path is not None:
3343                         f = open(log_path, 'a')
3344                         try:
3345                                 f.write(msg)
3346                         finally:
3347                                 f.close()
3348
3349         def _start(self):
3350
3351                 pkg = self.pkg
3352                 settings = self.settings
3353                 settings.setcpv(pkg)
3354                 self._tree = "bintree"
3355                 self._bintree = self.pkg.root_config.trees[self._tree]
3356                 self._verify = not self.opts.pretend
3357
3358                 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3359                         "portage", pkg.category, pkg.pf)
3360                 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3361                         pkg=pkg, settings=settings)
3362                 self._image_dir = os.path.join(dir_path, "image")
3363                 self._infloc = os.path.join(dir_path, "build-info")
3364                 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3365                 settings["EBUILD"] = self._ebuild_path
3366                 debug = settings.get("PORTAGE_DEBUG") == "1"
3367                 portage.doebuild_environment(self._ebuild_path, "setup",
3368                         settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3369                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3370
3371                 # The prefetcher has already completed or it
3372                 # could be running now. If it's running now,
3373                 # wait for it to complete since it holds
3374                 # a lock on the file being fetched. The
3375                 # portage.locks functions are only designed
3376                 # to work between separate processes. Since
3377                 # the lock is held by the current process,
3378                 # use the scheduler and fetcher methods to
3379                 # synchronize with the fetcher.
3380                 prefetcher = self.prefetcher
3381                 if prefetcher is None:
3382                         pass
3383                 elif not prefetcher.isAlive():
3384                         prefetcher.cancel()
3385                 elif prefetcher.poll() is None:
3386
3387                         waiting_msg = ("Fetching '%s' " + \
3388                                 "in the background. " + \
3389                                 "To view fetch progress, run `tail -f " + \
3390                                 "/var/log/emerge-fetch.log` in another " + \
3391                                 "terminal.") % prefetcher.pkg_path
3392                         msg_prefix = colorize("GOOD", " * ")
3393                         from textwrap import wrap
3394                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3395                                 for line in wrap(waiting_msg, 65))
3396                         if not self.background:
3397                                 writemsg(waiting_msg, noiselevel=-1)
3398
3399                         self._current_task = prefetcher
3400                         prefetcher.addExitListener(self._prefetch_exit)
3401                         return
3402
3403                 self._prefetch_exit(prefetcher)
3404
3405         def _prefetch_exit(self, prefetcher):
3406
3407                 pkg = self.pkg
3408                 pkg_count = self.pkg_count
3409                 if not (self.opts.pretend or self.opts.fetchonly):
3410                         self._build_dir.lock()
3411                         try:
3412                                 shutil.rmtree(self._build_dir.dir_path)
3413                         except EnvironmentError, e:
3414                                 if e.errno != errno.ENOENT:
3415                                         raise
3416                                 del e
3417                         portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3418                 fetcher = BinpkgFetcher(background=self.background,
3419                         logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3420                         pretend=self.opts.pretend, scheduler=self.scheduler)
3421                 pkg_path = fetcher.pkg_path
3422                 self._pkg_path = pkg_path
3423
3424                 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3425
3426                         msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3427                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3428                         short_msg = "emerge: (%s of %s) %s Fetch" % \
3429                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3430                         self.logger.log(msg, short_msg=short_msg)
3431                         self._start_task(fetcher, self._fetcher_exit)
3432                         return
3433
3434                 self._fetcher_exit(fetcher)
3435
3436         def _fetcher_exit(self, fetcher):
3437
3438                 # The fetcher only has a returncode when
3439                 # --getbinpkg is enabled.
3440                 if fetcher.returncode is not None:
3441                         self._fetched_pkg = True
3442                         if self._default_exit(fetcher) != os.EX_OK:
3443                                 self._unlock_builddir()
3444                                 self.wait()
3445                                 return
3446
3447                 if self.opts.pretend:
3448                         self._current_task = None
3449                         self.returncode = os.EX_OK
3450                         self.wait()
3451                         return
3452
3453                 verifier = None
3454                 if self._verify:
3455                         logfile = None
3456                         if self.background:
3457                                 logfile = self.settings.get("PORTAGE_LOG_FILE")
3458                         verifier = BinpkgVerifier(background=self.background,
3459                                 logfile=logfile, pkg=self.pkg)
3460                         self._start_task(verifier, self._verifier_exit)
3461                         return
3462
3463                 self._verifier_exit(verifier)
3464
3465         def _verifier_exit(self, verifier):
3466                 if verifier is not None and \
3467                         self._default_exit(verifier) != os.EX_OK:
3468                         self._unlock_builddir()
3469                         self.wait()
3470                         return
3471
3472                 logger = self.logger
3473                 pkg = self.pkg
3474                 pkg_count = self.pkg_count
3475                 pkg_path = self._pkg_path
3476
3477                 if self._fetched_pkg:
3478                         self._bintree.inject(pkg.cpv, filename=pkg_path)
3479
3480                 if self.opts.fetchonly:
3481                         self._current_task = None
3482                         self.returncode = os.EX_OK
3483                         self.wait()
3484                         return
3485
3486                 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3487                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3488                 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3489                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3490                 logger.log(msg, short_msg=short_msg)
3491
3492                 phase = "clean"
3493                 settings = self.settings
3494                 ebuild_phase = EbuildPhase(background=self.background,
3495                         pkg=pkg, phase=phase, scheduler=self.scheduler,
3496                         settings=settings, tree=self._tree)
3497
3498                 self._start_task(ebuild_phase, self._clean_exit)
3499
3500         def _clean_exit(self, clean_phase):
3501                 if self._default_exit(clean_phase) != os.EX_OK:
3502                         self._unlock_builddir()
3503                         self.wait()
3504                         return
3505
3506                 dir_path = self._build_dir.dir_path
3507
3508                 try:
3509                         shutil.rmtree(dir_path)
3510                 except (IOError, OSError), e:
3511                         if e.errno != errno.ENOENT:
3512                                 raise
3513                         del e
3514
3515                 infloc = self._infloc
3516                 pkg = self.pkg
3517                 pkg_path = self._pkg_path
3518
3519                 dir_mode = 0755
3520                 for mydir in (dir_path, self._image_dir, infloc):
3521                         portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3522                                 gid=portage.data.portage_gid, mode=dir_mode)
3523
3524                 # This initializes PORTAGE_LOG_FILE.
3525                 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3526                 self._writemsg_level(">>> Extracting info\n")
3527
3528                 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3529                 check_missing_metadata = ("CATEGORY", "PF")
3530                 missing_metadata = set()
3531                 for k in check_missing_metadata:
3532                         v = pkg_xpak.getfile(k)
3533                         if not v:
3534                                 missing_metadata.add(k)
3535
3536                 pkg_xpak.unpackinfo(infloc)
3537                 for k in missing_metadata:
3538                         if k == "CATEGORY":
3539                                 v = pkg.category
3540                         elif k == "PF":
3541                                 v = pkg.pf
3542                         else:
3543                                 continue
3544
3545                         f = open(os.path.join(infloc, k), 'wb')
3546                         try:
3547                                 f.write(v + "\n")
3548                         finally:
3549                                 f.close()
3550
3551                 # Store the md5sum in the vdb.
3552                 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3553                 try:
3554                         f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3555                 finally:
3556                         f.close()
3557
3558                 # This gives bashrc users an opportunity to do various things
3559                 # such as remove binary packages after they're installed.
3560                 settings = self.settings
3561                 settings.setcpv(self.pkg)
3562                 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3563                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3564
3565                 phase = "setup"
3566                 setup_phase = EbuildPhase(background=self.background,
3567                         pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3568                         settings=settings, tree=self._tree)
3569
3570                 setup_phase.addExitListener(self._setup_exit)
3571                 self._current_task = setup_phase
3572                 self.scheduler.scheduleSetup(setup_phase)
3573
3574         def _setup_exit(self, setup_phase):
3575                 if self._default_exit(setup_phase) != os.EX_OK:
3576                         self._unlock_builddir()
3577                         self.wait()
3578                         return
3579
3580                 extractor = BinpkgExtractorAsync(background=self.background,
3581                         image_dir=self._image_dir,
3582                         pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3583                 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3584                 self._start_task(extractor, self._extractor_exit)
3585
3586         def _extractor_exit(self, extractor):
3587                 if self._final_exit(extractor) != os.EX_OK:
3588                         self._unlock_builddir()
3589                         writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3590                                 noiselevel=-1)
3591                 self.wait()
3592
3593         def _unlock_builddir(self):
3594                 if self.opts.pretend or self.opts.fetchonly:
3595                         return
3596                 portage.elog.elog_process(self.pkg.cpv, self.settings)
3597                 self._build_dir.unlock()
3598
3599         def install(self):
3600
3601                 # This gives bashrc users an opportunity to do various things
3602                 # such as remove binary packages after they're installed.
3603                 settings = self.settings
3604                 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3605                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3606
3607                 merge = EbuildMerge(find_blockers=self.find_blockers,
3608                         ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3609                         pkg=self.pkg, pkg_count=self.pkg_count,
3610                         pkg_path=self._pkg_path, scheduler=self.scheduler,
3611                         settings=settings, tree=self._tree, world_atom=self.world_atom)
3612
3613                 try:
3614                         retval = merge.execute()
3615                 finally:
3616                         settings.pop("PORTAGE_BINPKG_FILE", None)
3617                         self._unlock_builddir()
3618                 return retval
3619
3620 class BinpkgFetcher(SpawnProcess):
3621
3622         __slots__ = ("pkg", "pretend",
3623                 "locked", "pkg_path", "_lock_obj")
3624
3625         def __init__(self, **kwargs):
3626                 SpawnProcess.__init__(self, **kwargs)
3627                 pkg = self.pkg
3628                 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3629
3630         def _start(self):
3631
3632                 if self.cancelled:
3633                         return
3634
3635                 pkg = self.pkg
3636                 pretend = self.pretend
3637                 bintree = pkg.root_config.trees["bintree"]
3638                 settings = bintree.settings
3639                 use_locks = "distlocks" in settings.features
3640                 pkg_path = self.pkg_path
3641
3642                 if not pretend:
3643                         portage.util.ensure_dirs(os.path.dirname(pkg_path))
3644                         if use_locks:
3645                                 self.lock()
3646                 exists = os.path.exists(pkg_path)
3647                 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3648                 if not (pretend or resume):
3649                         # Remove existing file or broken symlink.
3650                         try:
3651                                 os.unlink(pkg_path)
3652                         except OSError:
3653                                 pass
3654
3655                 # urljoin doesn't work correctly with
3656                 # unrecognized protocols like sftp
3657                 if bintree._remote_has_index:
3658                         rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3659                         if not rel_uri:
3660                                 rel_uri = pkg.cpv + ".tbz2"
3661                         uri = bintree._remote_base_uri.rstrip("/") + \
3662                                 "/" + rel_uri.lstrip("/")
3663                 else:
3664                         uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3665                                 "/" + pkg.pf + ".tbz2"
3666
3667                 if pretend:
3668                         portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3669                         self.returncode = os.EX_OK
3670                         self.wait()
3671                         return
3672
3673                 protocol = urlparse.urlparse(uri)[0]
3674                 fcmd_prefix = "FETCHCOMMAND"
3675                 if resume:
3676                         fcmd_prefix = "RESUMECOMMAND"
3677                 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3678                 if not fcmd:
3679                         fcmd = settings.get(fcmd_prefix)
3680
3681                 fcmd_vars = {
3682                         "DISTDIR" : os.path.dirname(pkg_path),
3683                         "URI"     : uri,
3684                         "FILE"    : os.path.basename(pkg_path)
3685                 }
3686
3687                 fetch_env = dict(settings.iteritems())
3688                 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3689                         for x in shlex.split(fcmd)]
3690
3691                 if self.fd_pipes is None:
3692                         self.fd_pipes = {}
3693                 fd_pipes = self.fd_pipes
3694
3695                 # Redirect all output to stdout since some fetchers like
3696                 # wget pollute stderr (if portage detects a problem then it
3697                 # can send it's own message to stderr).
3698                 fd_pipes.setdefault(0, sys.stdin.fileno())
3699                 fd_pipes.setdefault(1, sys.stdout.fileno())
3700                 fd_pipes.setdefault(2, sys.stdout.fileno())
3701
3702                 self.args = fetch_args
3703                 self.env = fetch_env
3704                 SpawnProcess._start(self)
3705
3706         def _set_returncode(self, wait_retval):
3707                 SpawnProcess._set_returncode(self, wait_retval)
3708                 if self.returncode == os.EX_OK:
3709                         # If possible, update the mtime to match the remote package if
3710                         # the fetcher didn't already do it automatically.
3711                         bintree = self.pkg.root_config.trees["bintree"]
3712                         if bintree._remote_has_index:
3713                                 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3714                                 if remote_mtime is not None:
3715                                         try:
3716                                                 remote_mtime = long(remote_mtime)
3717                                         except ValueError:
3718                                                 pass
3719                                         else:
3720                                                 try:
3721                                                         local_mtime = long(os.stat(self.pkg_path).st_mtime)
3722                                                 except OSError:
3723                                                         pass
3724                                                 else:
3725                                                         if remote_mtime != local_mtime:
3726                                                                 try:
3727                                                                         os.utime(self.pkg_path,
3728                                                                                 (remote_mtime, remote_mtime))
3729                                                                 except OSError:
3730                                                                         pass
3731
3732                 if self.locked:
3733                         self.unlock()
3734
3735         def lock(self):
3736                 """
3737                 This raises an AlreadyLocked exception if lock() is called
3738                 while a lock is already held. In order to avoid this, call
3739                 unlock() or check whether the "locked" attribute is True
3740                 or False before calling lock().
3741                 """
3742                 if self._lock_obj is not None:
3743                         raise self.AlreadyLocked((self._lock_obj,))
3744
3745                 self._lock_obj = portage.locks.lockfile(
3746                         self.pkg_path, wantnewlockfile=1)
3747                 self.locked = True
3748
3749         class AlreadyLocked(portage.exception.PortageException):
3750                 pass
3751
3752         def unlock(self):
3753                 if self._lock_obj is None:
3754                         return
3755                 portage.locks.unlockfile(self._lock_obj)
3756                 self._lock_obj = None
3757                 self.locked = False
3758
3759 class BinpkgVerifier(AsynchronousTask):
3760         __slots__ = ("logfile", "pkg",)
3761
3762         def _start(self):
3763                 """
3764                 Note: Unlike a normal AsynchronousTask.start() method,
3765                 this one does all work is synchronously. The returncode
3766                 attribute will be set before it returns.
3767                 """
3768
3769                 pkg = self.pkg
3770                 root_config = pkg.root_config
3771                 bintree = root_config.trees["bintree"]
3772                 rval = os.EX_OK
3773                 stdout_orig = sys.stdout
3774                 stderr_orig = sys.stderr
3775                 log_file = None
3776                 if self.background and self.logfile is not None:
3777                         log_file = open(self.logfile, 'a')
3778                 try:
3779                         if log_file is not None:
3780                                 sys.stdout = log_file
3781                                 sys.stderr = log_file
3782                         try:
3783                                 bintree.digestCheck(pkg)
3784                         except portage.exception.FileNotFound:
3785                                 writemsg("!!! Fetching Binary failed " + \
3786                                         "for '%s'\n" % pkg.cpv, noiselevel=-1)
3787                                 rval = 1
3788                         except portage.exception.DigestException, e:
3789                                 writemsg("\n!!! Digest verification failed:\n",
3790                                         noiselevel=-1)
3791                                 writemsg("!!! %s\n" % e.value[0],
3792                                         noiselevel=-1)
3793                                 writemsg("!!! Reason: %s\n" % e.value[1],
3794                                         noiselevel=-1)
3795                                 writemsg("!!! Got: %s\n" % e.value[2],
3796                                         noiselevel=-1)
3797                                 writemsg("!!! Expected: %s\n" % e.value[3],
3798                                         noiselevel=-1)
3799                                 rval = 1
3800                         if rval != os.EX_OK:
3801                                 pkg_path = bintree.getname(pkg.cpv)
3802                                 head, tail = os.path.split(pkg_path)
3803                                 temp_filename = portage._checksum_failure_temp_file(head, tail)
3804                                 writemsg("File renamed to '%s'\n" % (temp_filename,),
3805                                         noiselevel=-1)
3806                 finally:
3807                         sys.stdout = stdout_orig
3808                         sys.stderr = stderr_orig
3809                         if log_file is not None:
3810                                 log_file.close()
3811
3812                 self.returncode = rval
3813                 self.wait()
3814
3815 class BinpkgPrefetcher(CompositeTask):
3816
3817         __slots__ = ("pkg",) + \
3818                 ("pkg_path", "_bintree",)
3819
3820         def _start(self):
3821                 self._bintree = self.pkg.root_config.trees["bintree"]
3822                 fetcher = BinpkgFetcher(background=self.background,
3823                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3824                         scheduler=self.scheduler)
3825                 self.pkg_path = fetcher.pkg_path
3826                 self._start_task(fetcher, self._fetcher_exit)
3827
3828         def _fetcher_exit(self, fetcher):
3829
3830                 if self._default_exit(fetcher) != os.EX_OK:
3831                         self.wait()
3832                         return
3833
3834                 verifier = BinpkgVerifier(background=self.background,
3835                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3836                 self._start_task(verifier, self._verifier_exit)
3837
3838         def _verifier_exit(self, verifier):
3839                 if self._default_exit(verifier) != os.EX_OK:
3840                         self.wait()
3841                         return
3842
3843                 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3844
3845                 self._current_task = None
3846                 self.returncode = os.EX_OK
3847                 self.wait()
3848
3849 class BinpkgExtractorAsync(SpawnProcess):
3850
3851         __slots__ = ("image_dir", "pkg", "pkg_path")
3852
3853         _shell_binary = portage.const.BASH_BINARY
3854
3855         def _start(self):
3856                 self.args = [self._shell_binary, "-c",
3857                         "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3858                         (portage._shell_quote(self.pkg_path),
3859                         portage._shell_quote(self.image_dir))]
3860
3861                 self.env = self.pkg.root_config.settings.environ()
3862                 SpawnProcess._start(self)
3863
3864 class MergeListItem(CompositeTask):
3865
3866         """
3867         TODO: For parallel scheduling, everything here needs asynchronous
3868         execution support (start, poll, and wait methods).
3869         """
3870
3871         __slots__ = ("args_set",
3872                 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3873                 "find_blockers", "logger", "mtimedb", "pkg",
3874                 "pkg_count", "pkg_to_replace", "prefetcher",
3875                 "settings", "statusMessage", "world_atom") + \
3876                 ("_install_task",)
3877
3878         def _start(self):
3879
3880                 pkg = self.pkg
3881                 build_opts = self.build_opts
3882
3883                 if pkg.installed:
3884                         # uninstall,  executed by self.merge()
3885                         self.returncode = os.EX_OK
3886                         self.wait()
3887                         return
3888
3889                 args_set = self.args_set
3890                 find_blockers = self.find_blockers
3891                 logger = self.logger
3892                 mtimedb = self.mtimedb
3893                 pkg_count = self.pkg_count
3894                 scheduler = self.scheduler
3895                 settings = self.settings
3896                 world_atom = self.world_atom
3897                 ldpath_mtimes = mtimedb["ldpath"]
3898
3899                 action_desc = "Emerging"
3900                 preposition = "for"
3901                 if pkg.type_name == "binary":
3902                         action_desc += " binary"
3903
3904                 if build_opts.fetchonly:
3905                         action_desc = "Fetching"
3906
3907                 msg = "%s (%s of %s) %s" % \
3908                         (action_desc,
3909                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3910                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3911                         colorize("GOOD", pkg.cpv))
3912
3913                 portdb = pkg.root_config.trees["porttree"].dbapi
3914                 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3915                 if portdir_repo_name:
3916                         pkg_repo_name = pkg.metadata.get("repository")
3917                         if pkg_repo_name != portdir_repo_name:
3918                                 if not pkg_repo_name:
3919                                         pkg_repo_name = "unknown repo"
3920                                 msg += " from %s" % pkg_repo_name
3921
3922                 if pkg.root != "/":
3923                         msg += " %s %s" % (preposition, pkg.root)
3924
3925                 if not build_opts.pretend:
3926                         self.statusMessage(msg)
3927                         logger.log(" >>> emerge (%s of %s) %s to %s" % \
3928                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3929
3930                 if pkg.type_name == "ebuild":
3931
3932                         build = EbuildBuild(args_set=args_set,
3933                                 background=self.background,
3934                                 config_pool=self.config_pool,
3935                                 find_blockers=find_blockers,
3936                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3937                                 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3938                                 prefetcher=self.prefetcher, scheduler=scheduler,
3939                                 settings=settings, world_atom=world_atom)
3940
3941                         self._install_task = build
3942                         self._start_task(build, self._default_final_exit)
3943                         return
3944
3945                 elif pkg.type_name == "binary":
3946
3947                         binpkg = Binpkg(background=self.background,
3948                                 find_blockers=find_blockers,
3949                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3950                                 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3951                                 prefetcher=self.prefetcher, settings=settings,
3952                                 scheduler=scheduler, world_atom=world_atom)
3953
3954                         self._install_task = binpkg
3955                         self._start_task(binpkg, self._default_final_exit)
3956                         return
3957
3958         def _poll(self):
3959                 self._install_task.poll()
3960                 return self.returncode
3961
3962         def _wait(self):
3963                 self._install_task.wait()
3964                 return self.returncode
3965
3966         def merge(self):
3967
3968                 pkg = self.pkg
3969                 build_opts = self.build_opts
3970                 find_blockers = self.find_blockers
3971                 logger = self.logger
3972                 mtimedb = self.mtimedb
3973                 pkg_count = self.pkg_count
3974                 prefetcher = self.prefetcher
3975                 scheduler = self.scheduler
3976                 settings = self.settings
3977                 world_atom = self.world_atom
3978                 ldpath_mtimes = mtimedb["ldpath"]
3979
3980                 if pkg.installed:
3981                         if not (build_opts.buildpkgonly or \
3982                                 build_opts.fetchonly or build_opts.pretend):
3983
3984                                 uninstall = PackageUninstall(background=self.background,
3985                                         ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
3986                                         pkg=pkg, scheduler=scheduler, settings=settings)
3987
3988                                 uninstall.start()
3989                                 retval = uninstall.wait()
3990                                 if retval != os.EX_OK:
3991                                         return retval
3992                         return os.EX_OK
3993
3994                 if build_opts.fetchonly or \
3995                         build_opts.buildpkgonly:
3996                         return self.returncode
3997
3998                 retval = self._install_task.install()
3999                 return retval
4000
4001 class PackageMerge(AsynchronousTask):
4002         """
4003         TODO: Implement asynchronous merge so that the scheduler can
4004         run while a merge is executing.
4005         """
4006
4007         __slots__ = ("merge",)
4008
4009         def _start(self):
4010
4011                 pkg = self.merge.pkg
4012                 pkg_count = self.merge.pkg_count
4013
4014                 if pkg.installed:
4015                         action_desc = "Uninstalling"
4016                         preposition = "from"
4017                 else:
4018                         action_desc = "Installing"
4019                         preposition = "to"
4020
4021                 msg = "%s %s" % (action_desc, colorize("GOOD", pkg.cpv))
4022
4023                 if pkg.root != "/":
4024                         msg += " %s %s" % (preposition, pkg.root)
4025
4026                 if not self.merge.build_opts.fetchonly and \
4027                         not self.merge.build_opts.pretend and \
4028                         not self.merge.build_opts.buildpkgonly:
4029                         self.merge.statusMessage(msg)
4030
4031                 self.returncode = self.merge.merge()
4032                 self.wait()
4033
4034 class DependencyArg(object):
4035         def __init__(self, arg=None, root_config=None):
4036                 self.arg = arg
4037                 self.root_config = root_config
4038
4039         def __str__(self):
4040                 return str(self.arg)
4041
4042 class AtomArg(DependencyArg):
4043         def __init__(self, atom=None, **kwargs):
4044                 DependencyArg.__init__(self, **kwargs)
4045                 self.atom = atom
4046                 if not isinstance(self.atom, portage.dep.Atom):
4047                         self.atom = portage.dep.Atom(self.atom)
4048                 self.set = (self.atom, )
4049
4050 class PackageArg(DependencyArg):
4051         def __init__(self, package=None, **kwargs):
4052                 DependencyArg.__init__(self, **kwargs)
4053                 self.package = package
4054                 self.atom = portage.dep.Atom("=" + package.cpv)
4055                 self.set = (self.atom, )
4056
4057 class SetArg(DependencyArg):
4058         def __init__(self, set=None, **kwargs):
4059                 DependencyArg.__init__(self, **kwargs)
4060                 self.set = set
4061                 self.name = self.arg[len(SETPREFIX):]
4062
4063 class Dependency(SlotObject):
4064         __slots__ = ("atom", "blocker", "depth",
4065                 "parent", "onlydeps", "priority", "root")
4066         def __init__(self, **kwargs):
4067                 SlotObject.__init__(self, **kwargs)
4068                 if self.priority is None:
4069                         self.priority = DepPriority()
4070                 if self.depth is None:
4071                         self.depth = 0
4072
4073 class BlockerCache(portage.cache.mappings.MutableMapping):
4074         """This caches blockers of installed packages so that dep_check does not
4075         have to be done for every single installed package on every invocation of
4076         emerge.  The cache is invalidated whenever it is detected that something
4077         has changed that might alter the results of dep_check() calls:
4078                 1) the set of installed packages (including COUNTER) has changed
4079                 2) the old-style virtuals have changed
4080         """
4081
4082         # Number of uncached packages to trigger cache update, since
4083         # it's wasteful to update it for every vdb change.
4084         _cache_threshold = 5
4085
4086         class BlockerData(object):
4087
4088                 __slots__ = ("__weakref__", "atoms", "counter")
4089
4090                 def __init__(self, counter, atoms):
4091                         self.counter = counter
4092                         self.atoms = atoms
4093
4094         def __init__(self, myroot, vardb):
4095                 self._vardb = vardb
4096                 self._virtuals = vardb.settings.getvirtuals()
4097                 self._cache_filename = os.path.join(myroot,
4098                         portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4099                 self._cache_version = "1"
4100                 self._cache_data = None
4101                 self._modified = set()
4102                 self._load()
4103
4104         def _load(self):
4105                 try:
4106                         f = open(self._cache_filename, mode='rb')
4107                         mypickle = pickle.Unpickler(f)
4108                         self._cache_data = mypickle.load()
4109                         f.close()
4110                         del f
4111                 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4112                         if isinstance(e, pickle.UnpicklingError):
4113                                 writemsg("!!! Error loading '%s': %s\n" % \
4114                                         (self._cache_filename, str(e)), noiselevel=-1)
4115                         del e
4116
4117                 cache_valid = self._cache_data and \
4118                         isinstance(self._cache_data, dict) and \
4119                         self._cache_data.get("version") == self._cache_version and \
4120                         isinstance(self._cache_data.get("blockers"), dict)
4121                 if cache_valid:
4122                         # Validate all the atoms and counters so that
4123                         # corruption is detected as soon as possible.
4124                         invalid_items = set()
4125                         for k, v in self._cache_data["blockers"].iteritems():
4126                                 if not isinstance(k, basestring):
4127                                         invalid_items.add(k)
4128                                         continue
4129                                 try:
4130                                         if portage.catpkgsplit(k) is None:
4131                                                 invalid_items.add(k)
4132                                                 continue
4133                                 except portage.exception.InvalidData:
4134                                         invalid_items.add(k)
4135                                         continue
4136                                 if not isinstance(v, tuple) or \
4137                                         len(v) != 2:
4138                                         invalid_items.add(k)
4139                                         continue
4140                                 counter, atoms = v
4141                                 if not isinstance(counter, (int, long)):
4142                                         invalid_items.add(k)
4143                                         continue
4144                                 if not isinstance(atoms, (list, tuple)):
4145                                         invalid_items.add(k)
4146                                         continue
4147                                 invalid_atom = False
4148                                 for atom in atoms:
4149                                         if not isinstance(atom, basestring):
4150                                                 invalid_atom = True
4151                                                 break
4152                                         if atom[:1] != "!" or \
4153                                                 not portage.isvalidatom(
4154                                                 atom, allow_blockers=True):
4155                                                 invalid_atom = True
4156                                                 break
4157                                 if invalid_atom:
4158                                         invalid_items.add(k)
4159                                         continue
4160
4161                         for k in invalid_items:
4162                                 del self._cache_data["blockers"][k]
4163                         if not self._cache_data["blockers"]:
4164                                 cache_valid = False
4165
4166                 if not cache_valid:
4167                         self._cache_data = {"version":self._cache_version}
4168                         self._cache_data["blockers"] = {}
4169                         self._cache_data["virtuals"] = self._virtuals
4170                 self._modified.clear()
4171
4172         def flush(self):
4173                 """If the current user has permission and the internal blocker cache
4174                 been updated, save it to disk and mark it unmodified.  This is called
4175                 by emerge after it has proccessed blockers for all installed packages.
4176                 Currently, the cache is only written if the user has superuser
4177                 privileges (since that's required to obtain a lock), but all users
4178                 have read access and benefit from faster blocker lookups (as long as
4179                 the entire cache is still valid).  The cache is stored as a pickled
4180                 dict object with the following format:
4181
4182                 {
4183                         version : "1",
4184                         "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4185                         "virtuals" : vardb.settings.getvirtuals()
4186                 }
4187                 """
4188                 if len(self._modified) >= self._cache_threshold and \
4189                         secpass >= 2:
4190                         try:
4191                                 f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4192                                 pickle.dump(self._cache_data, f, -1)
4193                                 f.close()
4194                                 portage.util.apply_secpass_permissions(
4195                                         self._cache_filename, gid=portage.portage_gid, mode=0644)
4196                         except (IOError, OSError), e:
4197                                 pass
4198                         self._modified.clear()
4199
4200         def __setitem__(self, cpv, blocker_data):
4201                 """
4202                 Update the cache and mark it as modified for a future call to
4203                 self.flush().
4204
4205                 @param cpv: Package for which to cache blockers.
4206                 @type cpv: String
4207                 @param blocker_data: An object with counter and atoms attributes.
4208                 @type blocker_data: BlockerData
4209                 """
4210                 self._cache_data["blockers"][cpv] = \
4211                         (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4212                 self._modified.add(cpv)
4213
4214         def __iter__(self):
4215                 if self._cache_data is None:
4216                         # triggered by python-trace
4217                         return iter([])
4218                 return iter(self._cache_data["blockers"])
4219
4220         def __delitem__(self, cpv):
4221                 del self._cache_data["blockers"][cpv]
4222
4223         def __getitem__(self, cpv):
4224                 """
4225                 @rtype: BlockerData
4226                 @returns: An object with counter and atoms attributes.
4227                 """
4228                 return self.BlockerData(*self._cache_data["blockers"][cpv])
4229
4230 class BlockerDB(object):
4231
4232         def __init__(self, root_config):
4233                 self._root_config = root_config
4234                 self._vartree = root_config.trees["vartree"]
4235                 self._portdb = root_config.trees["porttree"].dbapi
4236
4237                 self._dep_check_trees = None
4238                 self._fake_vartree = None
4239
4240         def _get_fake_vartree(self, acquire_lock=0):
4241                 fake_vartree = self._fake_vartree
4242                 if fake_vartree is None:
4243                         fake_vartree = FakeVartree(self._root_config,
4244                                 acquire_lock=acquire_lock)
4245                         self._fake_vartree = fake_vartree
4246                         self._dep_check_trees = { self._vartree.root : {
4247                                 "porttree"    :  fake_vartree,
4248                                 "vartree"     :  fake_vartree,
4249                         }}
4250                 else:
4251                         fake_vartree.sync(acquire_lock=acquire_lock)
4252                 return fake_vartree
4253
4254         def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4255                 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4256                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4257                 settings = self._vartree.settings
4258                 stale_cache = set(blocker_cache)
4259                 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4260                 dep_check_trees = self._dep_check_trees
4261                 vardb = fake_vartree.dbapi
4262                 installed_pkgs = list(vardb)
4263
4264                 for inst_pkg in installed_pkgs:
4265                         stale_cache.discard(inst_pkg.cpv)
4266                         cached_blockers = blocker_cache.get(inst_pkg.cpv)
4267                         if cached_blockers is not None and \
4268                                 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4269                                 cached_blockers = None
4270                         if cached_blockers is not None:
4271                                 blocker_atoms = cached_blockers.atoms
4272                         else:
4273                                 # Use aux_get() to trigger FakeVartree global
4274                                 # updates on *DEPEND when appropriate.
4275                                 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4276                                 try:
4277                                         portage.dep._dep_check_strict = False
4278                                         success, atoms = portage.dep_check(depstr,
4279                                                 vardb, settings, myuse=inst_pkg.use.enabled,
4280                                                 trees=dep_check_trees, myroot=inst_pkg.root)
4281                                 finally:
4282                                         portage.dep._dep_check_strict = True
4283                                 if not success:
4284                                         pkg_location = os.path.join(inst_pkg.root,
4285                                                 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4286                                         portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4287                                                 (pkg_location, atoms), noiselevel=-1)
4288                                         continue
4289
4290                                 blocker_atoms = [atom for atom in atoms \
4291                                         if atom.startswith("!")]
4292                                 blocker_atoms.sort()
4293                                 counter = long(inst_pkg.metadata["COUNTER"])
4294                                 blocker_cache[inst_pkg.cpv] = \
4295                                         blocker_cache.BlockerData(counter, blocker_atoms)
4296                 for cpv in stale_cache:
4297                         del blocker_cache[cpv]
4298                 blocker_cache.flush()
4299
4300                 blocker_parents = digraph()
4301                 blocker_atoms = []
4302                 for pkg in installed_pkgs:
4303                         for blocker_atom in blocker_cache[pkg.cpv].atoms:
4304                                 blocker_atom = blocker_atom.lstrip("!")
4305                                 blocker_atoms.append(blocker_atom)
4306                                 blocker_parents.add(blocker_atom, pkg)
4307
4308                 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4309                 blocking_pkgs = set()
4310                 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4311                         blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4312
4313                 # Check for blockers in the other direction.
4314                 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4315                 try:
4316                         portage.dep._dep_check_strict = False
4317                         success, atoms = portage.dep_check(depstr,
4318                                 vardb, settings, myuse=new_pkg.use.enabled,
4319                                 trees=dep_check_trees, myroot=new_pkg.root)
4320                 finally:
4321                         portage.dep._dep_check_strict = True
4322                 if not success:
4323                         # We should never get this far with invalid deps.
4324                         show_invalid_depstring_notice(new_pkg, depstr, atoms)
4325                         assert False
4326
4327                 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4328                         if atom[:1] == "!"]
4329                 if blocker_atoms:
4330                         blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4331                         for inst_pkg in installed_pkgs:
4332                                 try:
4333                                         blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4334                                 except (portage.exception.InvalidDependString, StopIteration):
4335                                         continue
4336                                 blocking_pkgs.add(inst_pkg)
4337
4338                 return blocking_pkgs
4339
4340 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4341
4342         msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4343                 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4344         p_type, p_root, p_key, p_status = parent_node
4345         msg = []
4346         if p_status == "nomerge":
4347                 category, pf = portage.catsplit(p_key)
4348                 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4349                 msg.append("Portage is unable to process the dependencies of the ")
4350                 msg.append("'%s' package. " % p_key)
4351                 msg.append("In order to correct this problem, the package ")
4352                 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4353                 msg.append("As a temporary workaround, the --nodeps option can ")
4354                 msg.append("be used to ignore all dependencies.  For reference, ")
4355                 msg.append("the problematic dependencies can be found in the ")
4356                 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4357         else:
4358                 msg.append("This package can not be installed. ")
4359                 msg.append("Please notify the '%s' package maintainer " % p_key)
4360                 msg.append("about this problem.")
4361
4362         msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4363         writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4364
4365 class PackageVirtualDbapi(portage.dbapi):
4366         """
4367         A dbapi-like interface class that represents the state of the installed
4368         package database as new packages are installed, replacing any packages
4369         that previously existed in the same slot. The main difference between
4370         this class and fakedbapi is that this one uses Package instances
4371         internally (passed in via cpv_inject() and cpv_remove() calls).
4372         """
4373         def __init__(self, settings):
4374                 portage.dbapi.__init__(self)
4375                 self.settings = settings
4376                 self._match_cache = {}
4377                 self._cp_map = {}
4378                 self._cpv_map = {}
4379
4380         def clear(self):
4381                 """
4382                 Remove all packages.
4383                 """
4384                 if self._cpv_map:
4385                         self._clear_cache()
4386                         self._cp_map.clear()
4387                         self._cpv_map.clear()
4388
4389         def copy(self):
4390                 obj = PackageVirtualDbapi(self.settings)
4391                 obj._match_cache = self._match_cache.copy()
4392                 obj._cp_map = self._cp_map.copy()
4393                 for k, v in obj._cp_map.iteritems():
4394                         obj._cp_map[k] = v[:]
4395                 obj._cpv_map = self._cpv_map.copy()
4396                 return obj
4397
4398         def __iter__(self):
4399                 return self._cpv_map.itervalues()
4400
4401         def __contains__(self, item):
4402                 existing = self._cpv_map.get(item.cpv)
4403                 if existing is not None and \
4404                         existing == item:
4405                         return True
4406                 return False
4407
4408         def get(self, item, default=None):
4409                 cpv = getattr(item, "cpv", None)
4410                 if cpv is None:
4411                         if len(item) != 4:
4412                                 return default
4413                         type_name, root, cpv, operation = item
4414
4415                 existing = self._cpv_map.get(cpv)
4416                 if existing is not None and \
4417                         existing == item:
4418                         return existing
4419                 return default
4420
4421         def match_pkgs(self, atom):
4422                 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4423
4424         def _clear_cache(self):
4425                 if self._categories is not None:
4426                         self._categories = None
4427                 if self._match_cache:
4428                         self._match_cache = {}
4429
4430         def match(self, origdep, use_cache=1):
4431                 result = self._match_cache.get(origdep)
4432                 if result is not None:
4433                         return result[:]
4434                 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4435                 self._match_cache[origdep] = result
4436                 return result[:]
4437
4438         def cpv_exists(self, cpv):
4439                 return cpv in self._cpv_map
4440
4441         def cp_list(self, mycp, use_cache=1):
4442                 cachelist = self._match_cache.get(mycp)
4443                 # cp_list() doesn't expand old-style virtuals
4444                 if cachelist and cachelist[0].startswith(mycp):
4445                         return cachelist[:]
4446                 cpv_list = self._cp_map.get(mycp)
4447                 if cpv_list is None:
4448                         cpv_list = []
4449                 else:
4450                         cpv_list = [pkg.cpv for pkg in cpv_list]
4451                 self._cpv_sort_ascending(cpv_list)
4452                 if not (not cpv_list and mycp.startswith("virtual/")):
4453                         self._match_cache[mycp] = cpv_list
4454                 return cpv_list[:]
4455
4456         def cp_all(self):
4457                 return list(self._cp_map)
4458
4459         def cpv_all(self):
4460                 return list(self._cpv_map)
4461
4462         def cpv_inject(self, pkg):
4463                 cp_list = self._cp_map.get(pkg.cp)
4464                 if cp_list is None:
4465                         cp_list = []
4466                         self._cp_map[pkg.cp] = cp_list
4467                 e_pkg = self._cpv_map.get(pkg.cpv)
4468                 if e_pkg is not None:
4469                         if e_pkg == pkg:
4470                                 return
4471                         self.cpv_remove(e_pkg)
4472                 for e_pkg in cp_list:
4473                         if e_pkg.slot_atom == pkg.slot_atom:
4474                                 if e_pkg == pkg:
4475                                         return
4476                                 self.cpv_remove(e_pkg)
4477                                 break
4478                 cp_list.append(pkg)
4479                 self._cpv_map[pkg.cpv] = pkg
4480                 self._clear_cache()
4481
4482         def cpv_remove(self, pkg):
4483                 old_pkg = self._cpv_map.get(pkg.cpv)
4484                 if old_pkg != pkg:
4485                         raise KeyError(pkg)
4486                 self._cp_map[pkg.cp].remove(pkg)
4487                 del self._cpv_map[pkg.cpv]
4488                 self._clear_cache()
4489
4490         def aux_get(self, cpv, wants):
4491                 metadata = self._cpv_map[cpv].metadata
4492                 return [metadata.get(x, "") for x in wants]
4493
4494         def aux_update(self, cpv, values):
4495                 self._cpv_map[cpv].metadata.update(values)
4496                 self._clear_cache()
4497
4498 class depgraph(object):
4499
4500         pkg_tree_map = RootConfig.pkg_tree_map
4501
4502         _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4503
4504         def __init__(self, settings, trees, myopts, myparams, spinner):
4505                 self.settings = settings
4506                 self.target_root = settings["ROOT"]
4507                 self.myopts = myopts
4508                 self.myparams = myparams
4509                 self.edebug = 0
4510                 if settings.get("PORTAGE_DEBUG", "") == "1":
4511                         self.edebug = 1
4512                 self.spinner = spinner
4513                 self._running_root = trees["/"]["root_config"]
4514                 self._opts_no_restart = Scheduler._opts_no_restart
4515                 self.pkgsettings = {}
4516                 # Maps slot atom to package for each Package added to the graph.
4517                 self._slot_pkg_map = {}
4518                 # Maps nodes to the reasons they were selected for reinstallation.
4519                 self._reinstall_nodes = {}
4520                 self.mydbapi = {}
4521                 self.trees = {}
4522                 self._trees_orig = trees
4523                 self.roots = {}
4524                 # Contains a filtered view of preferred packages that are selected
4525                 # from available repositories.
4526                 self._filtered_trees = {}
4527                 # Contains installed packages and new packages that have been added
4528                 # to the graph.
4529                 self._graph_trees = {}
4530                 # All Package instances
4531                 self._pkg_cache = {}
4532                 for myroot in trees:
4533                         self.trees[myroot] = {}
4534                         # Create a RootConfig instance that references
4535                         # the FakeVartree instead of the real one.
4536                         self.roots[myroot] = RootConfig(
4537                                 trees[myroot]["vartree"].settings,
4538                                 self.trees[myroot],
4539                                 trees[myroot]["root_config"].setconfig)
4540                         for tree in ("porttree", "bintree"):
4541                                 self.trees[myroot][tree] = trees[myroot][tree]
4542                         self.trees[myroot]["vartree"] = \
4543                                 FakeVartree(trees[myroot]["root_config"],
4544                                         pkg_cache=self._pkg_cache)
4545                         self.pkgsettings[myroot] = portage.config(
4546                                 clone=self.trees[myroot]["vartree"].settings)
4547                         self._slot_pkg_map[myroot] = {}
4548                         vardb = self.trees[myroot]["vartree"].dbapi
4549                         preload_installed_pkgs = "--nodeps" not in self.myopts and \
4550                                 "--buildpkgonly" not in self.myopts
4551                         # This fakedbapi instance will model the state that the vdb will
4552                         # have after new packages have been installed.
4553                         fakedb = PackageVirtualDbapi(vardb.settings)
4554                         if preload_installed_pkgs:
4555                                 for pkg in vardb:
4556                                         self.spinner.update()
4557                                         # This triggers metadata updates via FakeVartree.
4558                                         vardb.aux_get(pkg.cpv, [])
4559                                         fakedb.cpv_inject(pkg)
4560
4561                         # Now that the vardb state is cached in our FakeVartree,
4562                         # we won't be needing the real vartree cache for awhile.
4563                         # To make some room on the heap, clear the vardbapi
4564                         # caches.
4565                         trees[myroot]["vartree"].dbapi._clear_cache()
4566                         gc.collect()
4567
4568                         self.mydbapi[myroot] = fakedb
4569                         def graph_tree():
4570                                 pass
4571                         graph_tree.dbapi = fakedb
4572                         self._graph_trees[myroot] = {}
4573                         self._filtered_trees[myroot] = {}
4574                         # Substitute the graph tree for the vartree in dep_check() since we
4575                         # want atom selections to be consistent with package selections
4576                         # have already been made.
4577                         self._graph_trees[myroot]["porttree"]   = graph_tree
4578                         self._graph_trees[myroot]["vartree"]    = graph_tree
4579                         def filtered_tree():
4580                                 pass
4581                         filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4582                         self._filtered_trees[myroot]["porttree"] = filtered_tree
4583
4584                         # Passing in graph_tree as the vartree here could lead to better
4585                         # atom selections in some cases by causing atoms for packages that
4586                         # have been added to the graph to be preferred over other choices.
4587                         # However, it can trigger atom selections that result in
4588                         # unresolvable direct circular dependencies. For example, this
4589                         # happens with gwydion-dylan which depends on either itself or
4590                         # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4591                         # gwydion-dylan-bin needs to be selected in order to avoid a
4592                         # an unresolvable direct circular dependency.
4593                         #
4594                         # To solve the problem described above, pass in "graph_db" so that
4595                         # packages that have been added to the graph are distinguishable
4596                         # from other available packages and installed packages. Also, pass
4597                         # the parent package into self._select_atoms() calls so that
4598                         # unresolvable direct circular dependencies can be detected and
4599                         # avoided when possible.
4600                         self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4601                         self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4602
4603                         dbs = []
4604                         portdb = self.trees[myroot]["porttree"].dbapi
4605                         bindb  = self.trees[myroot]["bintree"].dbapi
4606                         vardb  = self.trees[myroot]["vartree"].dbapi
4607                         #               (db, pkg_type, built, installed, db_keys)
4608                         if "--usepkgonly" not in self.myopts:
4609                                 db_keys = list(portdb._aux_cache_keys)
4610                                 dbs.append((portdb, "ebuild", False, False, db_keys))
4611                         if "--usepkg" in self.myopts:
4612                                 db_keys = list(bindb._aux_cache_keys)
4613                                 dbs.append((bindb,  "binary", True, False, db_keys))
4614                         db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4615                         dbs.append((vardb, "installed", True, True, db_keys))
4616                         self._filtered_trees[myroot]["dbs"] = dbs
4617                         if "--usepkg" in self.myopts:
4618                                 self.trees[myroot]["bintree"].populate(
4619                                         "--getbinpkg" in self.myopts,
4620                                         "--getbinpkgonly" in self.myopts)
4621                 del trees
4622
4623                 self.digraph=portage.digraph()
4624                 # contains all sets added to the graph
4625                 self._sets = {}
4626                 # contains atoms given as arguments
4627                 self._sets["args"] = InternalPackageSet()
4628                 # contains all atoms from all sets added to the graph, including
4629                 # atoms given as arguments
4630                 self._set_atoms = InternalPackageSet()
4631                 self._atom_arg_map = {}
4632                 # contains all nodes pulled in by self._set_atoms
4633                 self._set_nodes = set()
4634                 # Contains only Blocker -> Uninstall edges
4635                 self._blocker_uninstalls = digraph()
4636                 # Contains only Package -> Blocker edges
4637                 self._blocker_parents = digraph()
4638                 # Contains only irrelevant Package -> Blocker edges
4639                 self._irrelevant_blockers = digraph()
4640                 # Contains only unsolvable Package -> Blocker edges
4641                 self._unsolvable_blockers = digraph()
4642                 # Contains all Blocker -> Blocked Package edges
4643                 self._blocked_pkgs = digraph()
4644                 # Contains world packages that have been protected from
4645                 # uninstallation but may not have been added to the graph
4646                 # if the graph is not complete yet.
4647                 self._blocked_world_pkgs = {}
4648                 self._slot_collision_info = {}
4649                 # Slot collision nodes are not allowed to block other packages since
4650                 # blocker validation is only able to account for one package per slot.
4651                 self._slot_collision_nodes = set()
4652                 self._parent_atoms = {}
4653                 self._slot_conflict_parent_atoms = set()
4654                 self._serialized_tasks_cache = None
4655                 self._scheduler_graph = None
4656                 self._displayed_list = None
4657                 self._pprovided_args = []
4658                 self._missing_args = []
4659                 self._masked_installed = set()
4660                 self._unsatisfied_deps_for_display = []
4661                 self._unsatisfied_blockers_for_display = None
4662                 self._circular_deps_for_display = None
4663                 self._dep_stack = []
4664                 self._unsatisfied_deps = []
4665                 self._initially_unsatisfied_deps = []
4666                 self._ignored_deps = []
4667                 self._required_set_names = set(["system", "world"])
4668                 self._select_atoms = self._select_atoms_highest_available
4669                 self._select_package = self._select_pkg_highest_available
4670                 self._highest_pkg_cache = {}
4671
4672         def _show_slot_collision_notice(self):
4673                 """Show an informational message advising the user to mask one of the
4674                 the packages. In some cases it may be possible to resolve this
4675                 automatically, but support for backtracking (removal nodes that have
4676                 already been selected) will be required in order to handle all possible
4677                 cases.
4678                 """
4679
4680                 if not self._slot_collision_info:
4681                         return
4682
4683                 self._show_merge_list()
4684
4685                 msg = []
4686                 msg.append("\n!!! Multiple package instances within a single " + \
4687                         "package slot have been pulled\n")
4688                 msg.append("!!! into the dependency graph, resulting" + \
4689                         " in a slot conflict:\n\n")
4690                 indent = "  "
4691                 # Max number of parents shown, to avoid flooding the display.
4692                 max_parents = 3
4693                 explanation_columns = 70
4694                 explanations = 0
4695                 for (slot_atom, root), slot_nodes \
4696                         in self._slot_collision_info.iteritems():
4697                         msg.append(str(slot_atom))
4698                         msg.append("\n\n")
4699
4700                         for node in slot_nodes:
4701                                 msg.append(indent)
4702                                 msg.append(str(node))
4703                                 parent_atoms = self._parent_atoms.get(node)
4704                                 if parent_atoms:
4705                                         pruned_list = set()
4706                                         # Prefer conflict atoms over others.
4707                                         for parent_atom in parent_atoms:
4708                                                 if len(pruned_list) >= max_parents:
4709                                                         break
4710                                                 if parent_atom in self._slot_conflict_parent_atoms:
4711                                                         pruned_list.add(parent_atom)
4712
4713                                         # If this package was pulled in by conflict atoms then
4714                                         # show those alone since those are the most interesting.
4715                                         if not pruned_list:
4716                                                 # When generating the pruned list, prefer instances
4717                                                 # of DependencyArg over instances of Package.
4718                                                 for parent_atom in parent_atoms:
4719                                                         if len(pruned_list) >= max_parents:
4720                                                                 break
4721                                                         parent, atom = parent_atom
4722                                                         if isinstance(parent, DependencyArg):
4723                                                                 pruned_list.add(parent_atom)
4724                                                 # Prefer Packages instances that themselves have been
4725                                                 # pulled into collision slots.
4726                                                 for parent_atom in parent_atoms:
4727                                                         if len(pruned_list) >= max_parents:
4728                                                                 break
4729                                                         parent, atom = parent_atom
4730                                                         if isinstance(parent, Package) and \
4731                                                                 (parent.slot_atom, parent.root) \
4732                                                                 in self._slot_collision_info:
4733                                                                 pruned_list.add(parent_atom)
4734                                                 for parent_atom in parent_atoms:
4735                                                         if len(pruned_list) >= max_parents:
4736                                                                 break
4737                                                         pruned_list.add(parent_atom)
4738                                         omitted_parents = len(parent_atoms) - len(pruned_list)
4739                                         parent_atoms = pruned_list
4740                                         msg.append(" pulled in by\n")
4741                                         for parent_atom in parent_atoms:
4742                                                 parent, atom = parent_atom
4743                                                 msg.append(2*indent)
4744                                                 if isinstance(parent,
4745                                                         (PackageArg, AtomArg)):
4746                                                         # For PackageArg and AtomArg types, it's
4747                                                         # redundant to display the atom attribute.
4748                                                         msg.append(str(parent))
4749                                                 else:
4750                                                         # Display the specific atom from SetArg or
4751                                                         # Package types.
4752                                                         msg.append("%s required by %s" % (atom, parent))
4753                                                 msg.append("\n")
4754                                         if omitted_parents:
4755                                                 msg.append(2*indent)
4756                                                 msg.append("(and %d more)\n" % omitted_parents)
4757                                 else:
4758                                         msg.append(" (no parents)\n")
4759                                 msg.append("\n")
4760                         explanation = self._slot_conflict_explanation(slot_nodes)
4761                         if explanation:
4762                                 explanations += 1
4763                                 msg.append(indent + "Explanation:\n\n")
4764                                 for line in textwrap.wrap(explanation, explanation_columns):
4765                                         msg.append(2*indent + line + "\n")
4766                                 msg.append("\n")
4767                 msg.append("\n")
4768                 sys.stderr.write("".join(msg))
4769                 sys.stderr.flush()
4770
4771                 explanations_for_all = explanations == len(self._slot_collision_info)
4772
4773                 if explanations_for_all or "--quiet" in self.myopts:
4774                         return
4775
4776                 msg = []
4777                 msg.append("It may be possible to solve this problem ")
4778                 msg.append("by using package.mask to prevent one of ")
4779                 msg.append("those packages from being selected. ")
4780                 msg.append("However, it is also possible that conflicting ")
4781                 msg.append("dependencies exist such that they are impossible to ")
4782                 msg.append("satisfy simultaneously.  If such a conflict exists in ")
4783                 msg.append("the dependencies of two different packages, then those ")
4784                 msg.append("packages can not be installed simultaneously.")
4785
4786                 from formatter import AbstractFormatter, DumbWriter
4787                 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4788                 for x in msg:
4789                         f.add_flowing_data(x)
4790                 f.end_paragraph(1)
4791
4792                 msg = []
4793                 msg.append("For more information, see MASKED PACKAGES ")
4794                 msg.append("section in the emerge man page or refer ")
4795                 msg.append("to the Gentoo Handbook.")
4796                 for x in msg:
4797                         f.add_flowing_data(x)
4798                 f.end_paragraph(1)
4799                 f.writer.flush()
4800
4801         def _slot_conflict_explanation(self, slot_nodes):
4802                 """
4803                 When a slot conflict occurs due to USE deps, there are a few
4804                 different cases to consider:
4805
4806                 1) New USE are correctly set but --newuse wasn't requested so an
4807                    installed package with incorrect USE happened to get pulled
4808                    into graph before the new one.
4809
4810                 2) New USE are incorrectly set but an installed package has correct
4811                    USE so it got pulled into the graph, and a new instance also got
4812                    pulled in due to --newuse or an upgrade.
4813
4814                 3) Multiple USE deps exist that can't be satisfied simultaneously,
4815                    and multiple package instances got pulled into the same slot to
4816                    satisfy the conflicting deps.
4817
4818                 Currently, explanations and suggested courses of action are generated
4819                 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4820                 """
4821
4822                 if len(slot_nodes) != 2:
4823                         # Suggestions are only implemented for
4824                         # conflicts between two packages.
4825                         return None
4826
4827                 all_conflict_atoms = self._slot_conflict_parent_atoms
4828                 matched_node = None
4829                 matched_atoms = None
4830                 unmatched_node = None
4831                 for node in slot_nodes:
4832                         parent_atoms = self._parent_atoms.get(node)
4833                         if not parent_atoms:
4834                                 # Normally, there are always parent atoms. If there are
4835                                 # none then something unexpected is happening and there's
4836                                 # currently no suggestion for this case.
4837                                 return None
4838                         conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4839                         for parent_atom in conflict_atoms:
4840                                 parent, atom = parent_atom
4841                                 if not atom.use:
4842                                         # Suggestions are currently only implemented for cases
4843                                         # in which all conflict atoms have USE deps.
4844                                         return None
4845                         if conflict_atoms:
4846                                 if matched_node is not None:
4847                                         # If conflict atoms match multiple nodes
4848                                         # then there's no suggestion.
4849                                         return None
4850                                 matched_node = node
4851                                 matched_atoms = conflict_atoms
4852                         else:
4853                                 if unmatched_node is not None:
4854                                         # Neither node is matched by conflict atoms, and
4855                                         # there is no suggestion for this case.
4856                                         return None
4857                                 unmatched_node = node
4858
4859                 if matched_node is None or unmatched_node is None:
4860                         # This shouldn't happen.
4861                         return None
4862
4863                 if unmatched_node.installed and not matched_node.installed:
4864                         return "New USE are correctly set, but --newuse wasn't" + \
4865                                 " requested, so an installed package with incorrect USE " + \
4866                                 "happened to get pulled into the dependency graph. " + \
4867                                 "In order to solve " + \
4868                                 "this, either specify the --newuse option or explicitly " + \
4869                                 " reinstall '%s'." % matched_node.slot_atom
4870
4871                 if matched_node.installed and not unmatched_node.installed:
4872                         atoms = sorted(set(atom for parent, atom in matched_atoms))
4873                         explanation = ("New USE for '%s' are incorrectly set. " + \
4874                                 "In order to solve this, adjust USE to satisfy '%s'") % \
4875                                 (matched_node.slot_atom, atoms[0])
4876                         if len(atoms) > 1:
4877                                 for atom in atoms[1:-1]:
4878                                         explanation += ", '%s'" % (atom,)
4879                                 if len(atoms) > 2:
4880                                         explanation += ","
4881                                 explanation += " and '%s'" % (atoms[-1],)
4882                         explanation += "."
4883                         return explanation
4884
4885                 return None
4886
4887         def _process_slot_conflicts(self):
4888                 """
4889                 Process slot conflict data to identify specific atoms which
4890                 lead to conflict. These atoms only match a subset of the
4891                 packages that have been pulled into a given slot.
4892                 """
4893                 for (slot_atom, root), slot_nodes \
4894                         in self._slot_collision_info.iteritems():
4895
4896                         all_parent_atoms = set()
4897                         for pkg in slot_nodes:
4898                                 parent_atoms = self._parent_atoms.get(pkg)
4899                                 if not parent_atoms:
4900                                         continue
4901                                 all_parent_atoms.update(parent_atoms)
4902
4903                         for pkg in slot_nodes:
4904                                 parent_atoms = self._parent_atoms.get(pkg)
4905                                 if parent_atoms is None:
4906                                         parent_atoms = set()
4907                                         self._parent_atoms[pkg] = parent_atoms
4908                                 for parent_atom in all_parent_atoms:
4909                                         if parent_atom in parent_atoms:
4910                                                 continue
4911                                         # Use package set for matching since it will match via
4912                                         # PROVIDE when necessary, while match_from_list does not.
4913                                         parent, atom = parent_atom
4914                                         atom_set = InternalPackageSet(
4915                                                 initial_atoms=(atom,))
4916                                         if atom_set.findAtomForPackage(pkg):
4917                                                 parent_atoms.add(parent_atom)
4918                                         else:
4919                                                 self._slot_conflict_parent_atoms.add(parent_atom)
4920
4921         def _reinstall_for_flags(self, forced_flags,
4922                 orig_use, orig_iuse, cur_use, cur_iuse):
4923                 """Return a set of flags that trigger reinstallation, or None if there
4924                 are no such flags."""
4925                 if "--newuse" in self.myopts:
4926                         flags = set(orig_iuse.symmetric_difference(
4927                                 cur_iuse).difference(forced_flags))
4928                         flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4929                                 cur_iuse.intersection(cur_use)))
4930                         if flags:
4931                                 return flags
4932                 elif "changed-use" == self.myopts.get("--reinstall"):
4933                         flags = orig_iuse.intersection(orig_use).symmetric_difference(
4934                                 cur_iuse.intersection(cur_use))
4935                         if flags:
4936                                 return flags
4937                 return None
4938
4939         def _create_graph(self, allow_unsatisfied=False):
4940                 dep_stack = self._dep_stack
4941                 while dep_stack:
4942                         self.spinner.update()
4943                         dep = dep_stack.pop()
4944                         if isinstance(dep, Package):
4945                                 if not self._add_pkg_deps(dep,
4946                                         allow_unsatisfied=allow_unsatisfied):
4947                                         return 0
4948                                 continue
4949                         if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4950                                 return 0
4951                 return 1
4952
4953         def _add_dep(self, dep, allow_unsatisfied=False):
4954                 debug = "--debug" in self.myopts
4955                 buildpkgonly = "--buildpkgonly" in self.myopts
4956                 nodeps = "--nodeps" in self.myopts
4957                 empty = "empty" in self.myparams
4958                 deep = "deep" in self.myparams
4959                 update = "--update" in self.myopts and dep.depth <= 1
4960                 if dep.blocker:
4961                         if not buildpkgonly and \
4962                                 not nodeps and \
4963                                 dep.parent not in self._slot_collision_nodes:
4964                                 if dep.parent.onlydeps:
4965                                         # It's safe to ignore blockers if the
4966                                         # parent is an --onlydeps node.
4967                                         return 1
4968                                 # The blocker applies to the root where
4969                                 # the parent is or will be installed.
4970                                 blocker = Blocker(atom=dep.atom,
4971                                         eapi=dep.parent.metadata["EAPI"],
4972                                         root=dep.parent.root)
4973                                 self._blocker_parents.add(blocker, dep.parent)
4974                         return 1
4975                 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
4976                         onlydeps=dep.onlydeps)
4977                 if not dep_pkg:
4978                         if dep.priority.optional:
4979                                 # This could be an unecessary build-time dep
4980                                 # pulled in by --with-bdeps=y.
4981                                 return 1
4982                         if allow_unsatisfied:
4983                                 self._unsatisfied_deps.append(dep)
4984                                 return 1
4985                         self._unsatisfied_deps_for_display.append(
4986                                 ((dep.root, dep.atom), {"myparent":dep.parent}))
4987                         return 0
4988                 # In some cases, dep_check will return deps that shouldn't
4989                 # be proccessed any further, so they are identified and
4990                 # discarded here. Try to discard as few as possible since
4991                 # discarded dependencies reduce the amount of information
4992                 # available for optimization of merge order.
4993                 if dep.priority.satisfied and \
4994                         not dep_pkg.installed and \
4995                         not (existing_node or empty or deep or update):
4996                         myarg = None
4997                         if dep.root == self.target_root:
4998                                 try:
4999                                         myarg = self._iter_atoms_for_pkg(dep_pkg).next()
5000                                 except StopIteration:
5001                                         pass
5002                                 except portage.exception.InvalidDependString:
5003                                         if not dep_pkg.installed:
5004                                                 # This shouldn't happen since the package
5005                                                 # should have been masked.
5006                                                 raise
5007                         if not myarg:
5008                                 self._ignored_deps.append(dep)
5009                                 return 1
5010
5011                 if not self._add_pkg(dep_pkg, dep):
5012                         return 0
5013                 return 1
5014
5015         def _add_pkg(self, pkg, dep):
5016                 myparent = None
5017                 priority = None
5018                 depth = 0
5019                 if dep is None:
5020                         dep = Dependency()
5021                 else:
5022                         myparent = dep.parent
5023                         priority = dep.priority
5024                         depth = dep.depth
5025                 if priority is None:
5026                         priority = DepPriority()
5027                 """
5028                 Fills the digraph with nodes comprised of packages to merge.
5029                 mybigkey is the package spec of the package to merge.
5030                 myparent is the package depending on mybigkey ( or None )
5031                 addme = Should we add this package to the digraph or are we just looking at it's deps?
5032                         Think --onlydeps, we need to ignore packages in that case.
5033                 #stuff to add:
5034                 #SLOT-aware emerge
5035                 #IUSE-aware emerge -> USE DEP aware depgraph
5036                 #"no downgrade" emerge
5037                 """
5038                 # Ensure that the dependencies of the same package
5039                 # are never processed more than once.
5040                 previously_added = pkg in self.digraph
5041
5042                 # select the correct /var database that we'll be checking against
5043                 vardbapi = self.trees[pkg.root]["vartree"].dbapi
5044                 pkgsettings = self.pkgsettings[pkg.root]
5045
5046                 arg_atoms = None
5047                 if True:
5048                         try:
5049                                 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
5050                         except portage.exception.InvalidDependString, e:
5051                                 if not pkg.installed:
5052                                         show_invalid_depstring_notice(
5053                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5054                                         return 0
5055                                 del e
5056
5057                 if not pkg.onlydeps:
5058                         if not pkg.installed and \
5059                                 "empty" not in self.myparams and \
5060                                 vardbapi.match(pkg.slot_atom):
5061                                 # Increase the priority of dependencies on packages that
5062                                 # are being rebuilt. This optimizes merge order so that
5063                                 # dependencies are rebuilt/updated as soon as possible,
5064                                 # which is needed especially when emerge is called by
5065                                 # revdep-rebuild since dependencies may be affected by ABI
5066                                 # breakage that has rendered them useless. Don't adjust
5067                                 # priority here when in "empty" mode since all packages
5068                                 # are being merged in that case.
5069                                 priority.rebuild = True
5070
5071                         existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
5072                         slot_collision = False
5073                         if existing_node:
5074                                 existing_node_matches = pkg.cpv == existing_node.cpv
5075                                 if existing_node_matches and \
5076                                         pkg != existing_node and \
5077                                         dep.atom is not None:
5078                                         # Use package set for matching since it will match via
5079                                         # PROVIDE when necessary, while match_from_list does not.
5080                                         atom_set = InternalPackageSet(initial_atoms=[dep.atom])
5081                                         if not atom_set.findAtomForPackage(existing_node):
5082                                                 existing_node_matches = False
5083                                 if existing_node_matches:
5084                                         # The existing node can be reused.
5085                                         if arg_atoms:
5086                                                 for parent_atom in arg_atoms:
5087                                                         parent, atom = parent_atom
5088                                                         self.digraph.add(existing_node, parent,
5089                                                                 priority=priority)
5090                                                         self._add_parent_atom(existing_node, parent_atom)
5091                                         # If a direct circular dependency is not an unsatisfied
5092                                         # buildtime dependency then drop it here since otherwise
5093                                         # it can skew the merge order calculation in an unwanted
5094                                         # way.
5095                                         if existing_node != myparent or \
5096                                                 (priority.buildtime and not priority.satisfied):
5097                                                 self.digraph.addnode(existing_node, myparent,
5098                                                         priority=priority)
5099                                                 if dep.atom is not None and dep.parent is not None:
5100                                                         self._add_parent_atom(existing_node,
5101                                                                 (dep.parent, dep.atom))
5102                                         return 1
5103                                 else:
5104
5105                                         # A slot collision has occurred.  Sometimes this coincides
5106                                         # with unresolvable blockers, so the slot collision will be
5107                                         # shown later if there are no unresolvable blockers.
5108                                         self._add_slot_conflict(pkg)
5109                                         slot_collision = True
5110
5111                         if slot_collision:
5112                                 # Now add this node to the graph so that self.display()
5113                                 # can show use flags and --tree portage.output.  This node is
5114                                 # only being partially added to the graph.  It must not be
5115                                 # allowed to interfere with the other nodes that have been
5116                                 # added.  Do not overwrite data for existing nodes in
5117                                 # self.mydbapi since that data will be used for blocker
5118                                 # validation.
5119                                 # Even though the graph is now invalid, continue to process
5120                                 # dependencies so that things like --fetchonly can still
5121                                 # function despite collisions.
5122                                 pass
5123                         elif not previously_added:
5124                                 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
5125                                 self.mydbapi[pkg.root].cpv_inject(pkg)
5126                                 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
5127
5128                         if not pkg.installed:
5129                                 # Allow this package to satisfy old-style virtuals in case it
5130                                 # doesn't already. Any pre-existing providers will be preferred
5131                                 # over this one.
5132                                 try:
5133                                         pkgsettings.setinst(pkg.cpv, pkg.metadata)
5134                                         # For consistency, also update the global virtuals.
5135                                         settings = self.roots[pkg.root].settings
5136                                         settings.unlock()
5137                                         settings.setinst(pkg.cpv, pkg.metadata)
5138                                         settings.lock()
5139                                 except portage.exception.InvalidDependString, e:
5140                                         show_invalid_depstring_notice(
5141                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5142                                         del e
5143                                         return 0
5144
5145                 if arg_atoms:
5146                         self._set_nodes.add(pkg)
5147
5148                 # Do this even when addme is False (--onlydeps) so that the
5149                 # parent/child relationship is always known in case
5150                 # self._show_slot_collision_notice() needs to be called later.
5151                 self.digraph.add(pkg, myparent, priority=priority)
5152                 if dep.atom is not None and dep.parent is not None:
5153                         self._add_parent_atom(pkg, (dep.parent, dep.atom))
5154
5155                 if arg_atoms:
5156                         for parent_atom in arg_atoms:
5157                                 parent, atom = parent_atom
5158                                 self.digraph.add(pkg, parent, priority=priority)
5159                                 self._add_parent_atom(pkg, parent_atom)
5160
5161                 """ This section determines whether we go deeper into dependencies or not.
5162                     We want to go deeper on a few occasions:
5163                     Installing package A, we need to make sure package A's deps are met.
5164                     emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5165                     If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5166                 """
5167                 dep_stack = self._dep_stack
5168                 if "recurse" not in self.myparams:
5169                         return 1
5170                 elif pkg.installed and \
5171                         "deep" not in self.myparams:
5172                         dep_stack = self._ignored_deps
5173
5174                 self.spinner.update()
5175
5176                 if arg_atoms:
5177                         depth = 0
5178                 pkg.depth = depth
5179                 if not previously_added:
5180                         dep_stack.append(pkg)
5181                 return 1
5182
5183         def _add_parent_atom(self, pkg, parent_atom):
5184                 parent_atoms = self._parent_atoms.get(pkg)
5185                 if parent_atoms is None:
5186                         parent_atoms = set()
5187                         self._parent_atoms[pkg] = parent_atoms
5188                 parent_atoms.add(parent_atom)
5189
5190         def _add_slot_conflict(self, pkg):
5191                 self._slot_collision_nodes.add(pkg)
5192                 slot_key = (pkg.slot_atom, pkg.root)
5193                 slot_nodes = self._slot_collision_info.get(slot_key)
5194                 if slot_nodes is None:
5195                         slot_nodes = set()
5196                         slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5197                         self._slot_collision_info[slot_key] = slot_nodes
5198                 slot_nodes.add(pkg)
5199
5200         def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5201
5202                 mytype = pkg.type_name
5203                 myroot = pkg.root
5204                 mykey = pkg.cpv
5205                 metadata = pkg.metadata
5206                 myuse = pkg.use.enabled
5207                 jbigkey = pkg
5208                 depth = pkg.depth + 1
5209                 removal_action = "remove" in self.myparams
5210
5211                 edepend={}
5212                 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5213                 for k in depkeys:
5214                         edepend[k] = metadata[k]
5215
5216                 if not pkg.built and \
5217                         "--buildpkgonly" in self.myopts and \
5218                         "deep" not in self.myparams and \
5219                         "empty" not in self.myparams:
5220                         edepend["RDEPEND"] = ""
5221                         edepend["PDEPEND"] = ""
5222                 bdeps_optional = False
5223
5224                 if pkg.built and not removal_action:
5225                         if self.myopts.get("--with-bdeps", "n") == "y":
5226                                 # Pull in build time deps as requested, but marked them as
5227                                 # "optional" since they are not strictly required. This allows
5228                                 # more freedom in the merge order calculation for solving
5229                                 # circular dependencies. Don't convert to PDEPEND since that
5230                                 # could make --with-bdeps=y less effective if it is used to
5231                                 # adjust merge order to prevent built_with_use() calls from
5232                                 # failing.
5233                                 bdeps_optional = True
5234                         else:
5235                                 # built packages do not have build time dependencies.
5236                                 edepend["DEPEND"] = ""
5237
5238                 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5239                         edepend["DEPEND"] = ""
5240
5241                 deps = (
5242                         ("/", edepend["DEPEND"],
5243                                 self._priority(buildtime=(not bdeps_optional),
5244                                 optional=bdeps_optional)),
5245                         (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5246                         (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5247                 )
5248
5249                 debug = "--debug" in self.myopts
5250                 strict = mytype != "installed"
5251                 try:
5252                         for dep_root, dep_string, dep_priority in deps:
5253                                 if not dep_string:
5254                                         continue
5255                                 if debug:
5256                                         print
5257                                         print "Parent:   ", jbigkey
5258                                         print "Depstring:", dep_string
5259                                         print "Priority:", dep_priority
5260                                 vardb = self.roots[dep_root].trees["vartree"].dbapi
5261                                 try:
5262                                         selected_atoms = self._select_atoms(dep_root,
5263                                                 dep_string, myuse=myuse, parent=pkg, strict=strict,
5264                                                 priority=dep_priority)
5265                                 except portage.exception.InvalidDependString, e:
5266                                         show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5267                                         return 0
5268                                 if debug:
5269                                         print "Candidates:", selected_atoms
5270
5271                                 for atom in selected_atoms:
5272                                         try:
5273
5274                                                 atom = portage.dep.Atom(atom)
5275
5276                                                 mypriority = dep_priority.copy()
5277                                                 if not atom.blocker and vardb.match(atom):
5278                                                         mypriority.satisfied = True
5279
5280                                                 if not self._add_dep(Dependency(atom=atom,
5281                                                         blocker=atom.blocker, depth=depth, parent=pkg,
5282                                                         priority=mypriority, root=dep_root),
5283                                                         allow_unsatisfied=allow_unsatisfied):
5284                                                         return 0
5285
5286                                         except portage.exception.InvalidAtom, e:
5287                                                 show_invalid_depstring_notice(
5288                                                         pkg, dep_string, str(e))
5289                                                 del e
5290                                                 if not pkg.installed:
5291                                                         return 0
5292
5293                                 if debug:
5294                                         print "Exiting...", jbigkey
5295                 except portage.exception.AmbiguousPackageName, e:
5296                         pkgs = e.args[0]
5297                         portage.writemsg("\n\n!!! An atom in the dependencies " + \
5298                                 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5299                         for cpv in pkgs:
5300                                 portage.writemsg("    %s\n" % cpv, noiselevel=-1)
5301                         portage.writemsg("\n", noiselevel=-1)
5302                         if mytype == "binary":
5303                                 portage.writemsg(
5304                                         "!!! This binary package cannot be installed: '%s'\n" % \
5305                                         mykey, noiselevel=-1)
5306                         elif mytype == "ebuild":
5307                                 portdb = self.roots[myroot].trees["porttree"].dbapi
5308                                 myebuild, mylocation = portdb.findname2(mykey)
5309                                 portage.writemsg("!!! This ebuild cannot be installed: " + \
5310                                         "'%s'\n" % myebuild, noiselevel=-1)
5311                         portage.writemsg("!!! Please notify the package maintainer " + \
5312                                 "that atoms must be fully-qualified.\n", noiselevel=-1)
5313                         return 0
5314                 return 1
5315
5316         def _priority(self, **kwargs):
5317                 if "remove" in self.myparams:
5318                         priority_constructor = UnmergeDepPriority
5319                 else:
5320                         priority_constructor = DepPriority
5321                 return priority_constructor(**kwargs)
5322
5323         def _dep_expand(self, root_config, atom_without_category):
5324                 """
5325                 @param root_config: a root config instance
5326                 @type root_config: RootConfig
5327                 @param atom_without_category: an atom without a category component
5328                 @type atom_without_category: String
5329                 @rtype: list
5330                 @returns: a list of atoms containing categories (possibly empty)
5331                 """
5332                 null_cp = portage.dep_getkey(insert_category_into_atom(
5333                         atom_without_category, "null"))
5334                 cat, atom_pn = portage.catsplit(null_cp)
5335
5336                 dbs = self._filtered_trees[root_config.root]["dbs"]
5337                 categories = set()
5338                 for db, pkg_type, built, installed, db_keys in dbs:
5339                         for cat in db.categories:
5340                                 if db.cp_list("%s/%s" % (cat, atom_pn)):
5341                                         categories.add(cat)
5342
5343                 deps = []
5344                 for cat in categories:
5345                         deps.append(insert_category_into_atom(
5346                                 atom_without_category, cat))
5347                 return deps
5348
5349         def _have_new_virt(self, root, atom_cp):
5350                 ret = False
5351                 for db, pkg_type, built, installed, db_keys in \
5352                         self._filtered_trees[root]["dbs"]:
5353                         if db.cp_list(atom_cp):
5354                                 ret = True
5355                                 break
5356                 return ret
5357
5358         def _iter_atoms_for_pkg(self, pkg):
5359                 # TODO: add multiple $ROOT support
5360                 if pkg.root != self.target_root:
5361                         return
5362                 atom_arg_map = self._atom_arg_map
5363                 root_config = self.roots[pkg.root]
5364                 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5365                         atom_cp = portage.dep_getkey(atom)
5366                         if atom_cp != pkg.cp and \
5367                                 self._have_new_virt(pkg.root, atom_cp):
5368                                 continue
5369                         visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5370                         visible_pkgs.reverse() # descending order
5371                         higher_slot = None
5372                         for visible_pkg in visible_pkgs:
5373                                 if visible_pkg.cp != atom_cp:
5374                                         continue
5375                                 if pkg >= visible_pkg:
5376                                         # This is descending order, and we're not
5377                                         # interested in any versions <= pkg given.
5378                                         break
5379                                 if pkg.slot_atom != visible_pkg.slot_atom:
5380                                         higher_slot = visible_pkg
5381                                         break
5382                         if higher_slot is not None:
5383                                 continue
5384                         for arg in atom_arg_map[(atom, pkg.root)]:
5385                                 if isinstance(arg, PackageArg) and \
5386                                         arg.package != pkg:
5387                                         continue
5388                                 yield arg, atom
5389
5390         def select_files(self, myfiles):
5391                 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5392                 appropriate depgraph and return a favorite list."""
5393                 debug = "--debug" in self.myopts
5394                 root_config = self.roots[self.target_root]
5395                 sets = root_config.sets
5396                 getSetAtoms = root_config.setconfig.getSetAtoms
5397                 myfavorites=[]
5398                 myroot = self.target_root
5399                 dbs = self._filtered_trees[myroot]["dbs"]
5400                 vardb = self.trees[myroot]["vartree"].dbapi
5401                 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5402                 portdb = self.trees[myroot]["porttree"].dbapi
5403                 bindb = self.trees[myroot]["bintree"].dbapi
5404                 pkgsettings = self.pkgsettings[myroot]
5405                 args = []
5406                 onlydeps = "--onlydeps" in self.myopts
5407                 lookup_owners = []
5408                 for x in myfiles:
5409                         ext = os.path.splitext(x)[1]
5410                         if ext==".tbz2":
5411                                 if not os.path.exists(x):
5412                                         if os.path.exists(
5413                                                 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5414                                                 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5415                                         elif os.path.exists(
5416                                                 os.path.join(pkgsettings["PKGDIR"], x)):
5417                                                 x = os.path.join(pkgsettings["PKGDIR"], x)
5418                                         else:
5419                                                 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5420                                                 print "!!! Please ensure the tbz2 exists as specified.\n"
5421                                                 return 0, myfavorites
5422                                 mytbz2=portage.xpak.tbz2(x)
5423                                 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5424                                 if os.path.realpath(x) != \
5425                                         os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5426                                         print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5427                                         return 0, myfavorites
5428                                 db_keys = list(bindb._aux_cache_keys)
5429                                 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5430                                 pkg = Package(type_name="binary", root_config=root_config,
5431                                         cpv=mykey, built=True, metadata=metadata,
5432                                         onlydeps=onlydeps)
5433                                 self._pkg_cache[pkg] = pkg
5434                                 args.append(PackageArg(arg=x, package=pkg,
5435                                         root_config=root_config))
5436                         elif ext==".ebuild":
5437                                 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5438                                 pkgdir = os.path.dirname(ebuild_path)
5439                                 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5440                                 cp = pkgdir[len(tree_root)+1:]
5441                                 e = portage.exception.PackageNotFound(
5442                                         ("%s is not in a valid portage tree " + \
5443                                         "hierarchy or does not exist") % x)
5444                                 if not portage.isvalidatom(cp):
5445                                         raise e
5446                                 cat = portage.catsplit(cp)[0]
5447                                 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5448                                 if not portage.isvalidatom("="+mykey):
5449                                         raise e
5450                                 ebuild_path = portdb.findname(mykey)
5451                                 if ebuild_path:
5452                                         if ebuild_path != os.path.join(os.path.realpath(tree_root),
5453                                                 cp, os.path.basename(ebuild_path)):
5454                                                 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5455                                                 return 0, myfavorites
5456                                         if mykey not in portdb.xmatch(
5457                                                 "match-visible", portage.dep_getkey(mykey)):
5458                                                 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5459                                                 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5460                                                 print colorize("BAD", "*** page for details.")
5461                                                 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5462                                                         "Continuing...")
5463                                 else:
5464                                         raise portage.exception.PackageNotFound(
5465                                                 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5466                                 db_keys = list(portdb._aux_cache_keys)
5467                                 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5468                                 pkg = Package(type_name="ebuild", root_config=root_config,
5469                                         cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5470                                 pkgsettings.setcpv(pkg)
5471                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5472                                 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
5473                                 self._pkg_cache[pkg] = pkg
5474                                 args.append(PackageArg(arg=x, package=pkg,
5475                                         root_config=root_config))
5476                         elif x.startswith(os.path.sep):
5477                                 if not x.startswith(myroot):
5478                                         portage.writemsg(("\n\n!!! '%s' does not start with" + \
5479                                                 " $ROOT.\n") % x, noiselevel=-1)
5480                                         return 0, []
5481                                 # Queue these up since it's most efficient to handle
5482                                 # multiple files in a single iter_owners() call.
5483                                 lookup_owners.append(x)
5484                         else:
5485                                 if x in ("system", "world"):
5486                                         x = SETPREFIX + x
5487                                 if x.startswith(SETPREFIX):
5488                                         s = x[len(SETPREFIX):]
5489                                         if s not in sets:
5490                                                 raise portage.exception.PackageSetNotFound(s)
5491                                         if s in self._sets:
5492                                                 continue
5493                                         # Recursively expand sets so that containment tests in
5494                                         # self._get_parent_sets() properly match atoms in nested
5495                                         # sets (like if world contains system).
5496                                         expanded_set = InternalPackageSet(
5497                                                 initial_atoms=getSetAtoms(s))
5498                                         self._sets[s] = expanded_set
5499                                         args.append(SetArg(arg=x, set=expanded_set,
5500                                                 root_config=root_config))
5501                                         continue
5502                                 if not is_valid_package_atom(x):
5503                                         portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5504                                                 noiselevel=-1)
5505                                         portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5506                                         portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5507                                         return (0,[])
5508                                 # Don't expand categories or old-style virtuals here unless
5509                                 # necessary. Expansion of old-style virtuals here causes at
5510                                 # least the following problems:
5511                                 #   1) It's more difficult to determine which set(s) an atom
5512                                 #      came from, if any.
5513                                 #   2) It takes away freedom from the resolver to choose other
5514                                 #      possible expansions when necessary.
5515                                 if "/" in x:
5516                                         args.append(AtomArg(arg=x, atom=x,
5517                                                 root_config=root_config))
5518                                         continue
5519                                 expanded_atoms = self._dep_expand(root_config, x)
5520                                 installed_cp_set = set()
5521                                 for atom in expanded_atoms:
5522                                         atom_cp = portage.dep_getkey(atom)
5523                                         if vardb.cp_list(atom_cp):
5524                                                 installed_cp_set.add(atom_cp)
5525                                 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5526                                         installed_cp = iter(installed_cp_set).next()
5527                                         expanded_atoms = [atom for atom in expanded_atoms \
5528                                                 if portage.dep_getkey(atom) == installed_cp]
5529
5530                                 if len(expanded_atoms) > 1:
5531                                         print
5532                                         print
5533                                         ambiguous_package_name(x, expanded_atoms, root_config,
5534                                                 self.spinner, self.myopts)
5535                                         return False, myfavorites
5536                                 if expanded_atoms:
5537                                         atom = expanded_atoms[0]
5538                                 else:
5539                                         null_atom = insert_category_into_atom(x, "null")
5540                                         null_cp = portage.dep_getkey(null_atom)
5541                                         cat, atom_pn = portage.catsplit(null_cp)
5542                                         virts_p = root_config.settings.get_virts_p().get(atom_pn)
5543                                         if virts_p:
5544                                                 # Allow the depgraph to choose which virtual.
5545                                                 atom = insert_category_into_atom(x, "virtual")
5546                                         else:
5547                                                 atom = insert_category_into_atom(x, "null")
5548
5549                                 args.append(AtomArg(arg=x, atom=atom,
5550                                         root_config=root_config))
5551
5552                 if lookup_owners:
5553                         relative_paths = []
5554                         search_for_multiple = False
5555                         if len(lookup_owners) > 1:
5556                                 search_for_multiple = True
5557
5558                         for x in lookup_owners:
5559                                 if not search_for_multiple and os.path.isdir(x):
5560                                         search_for_multiple = True
5561                                 relative_paths.append(x[len(myroot):])
5562
5563                         owners = set()
5564                         for pkg, relative_path in \
5565                                 real_vardb._owners.iter_owners(relative_paths):
5566                                 owners.add(pkg.mycpv)
5567                                 if not search_for_multiple:
5568                                         break
5569
5570                         if not owners:
5571                                 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5572                                         "by any package.\n") % lookup_owners[0], noiselevel=-1)
5573                                 return 0, []
5574
5575                         for cpv in owners:
5576                                 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5577                                 if not slot:
5578                                         # portage now masks packages with missing slot, but it's
5579                                         # possible that one was installed by an older version
5580                                         atom = portage.cpv_getkey(cpv)
5581                                 else:
5582                                         atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5583                                 args.append(AtomArg(arg=atom, atom=atom,
5584                                         root_config=root_config))
5585
5586                 if "--update" in self.myopts:
5587                         # In some cases, the greedy slots behavior can pull in a slot that
5588                         # the user would want to uninstall due to it being blocked by a
5589                         # newer version in a different slot. Therefore, it's necessary to
5590                         # detect and discard any that should be uninstalled. Each time
5591                         # that arguments are updated, package selections are repeated in
5592                         # order to ensure consistency with the current arguments:
5593                         #
5594                         #  1) Initialize args
5595                         #  2) Select packages and generate initial greedy atoms
5596                         #  3) Update args with greedy atoms
5597                         #  4) Select packages and generate greedy atoms again, while
5598                         #     accounting for any blockers between selected packages
5599                         #  5) Update args with revised greedy atoms
5600
5601                         self._set_args(args)
5602                         greedy_args = []
5603                         for arg in args:
5604                                 greedy_args.append(arg)
5605                                 if not isinstance(arg, AtomArg):
5606                                         continue
5607                                 for atom in self._greedy_slots(arg.root_config, arg.atom):
5608                                         greedy_args.append(
5609                                                 AtomArg(arg=arg.arg, atom=atom,
5610                                                         root_config=arg.root_config))
5611
5612                         self._set_args(greedy_args)
5613                         del greedy_args
5614
5615                         # Revise greedy atoms, accounting for any blockers
5616                         # between selected packages.
5617                         revised_greedy_args = []
5618                         for arg in args:
5619                                 revised_greedy_args.append(arg)
5620                                 if not isinstance(arg, AtomArg):
5621                                         continue
5622                                 for atom in self._greedy_slots(arg.root_config, arg.atom,
5623                                         blocker_lookahead=True):
5624                                         revised_greedy_args.append(
5625                                                 AtomArg(arg=arg.arg, atom=atom,
5626                                                         root_config=arg.root_config))
5627                         args = revised_greedy_args
5628                         del revised_greedy_args
5629
5630                 self._set_args(args)
5631
5632                 myfavorites = set(myfavorites)
5633                 for arg in args:
5634                         if isinstance(arg, (AtomArg, PackageArg)):
5635                                 myfavorites.add(arg.atom)
5636                         elif isinstance(arg, SetArg):
5637                                 myfavorites.add(arg.arg)
5638                 myfavorites = list(myfavorites)
5639
5640                 pprovideddict = pkgsettings.pprovideddict
5641                 if debug:
5642                         portage.writemsg("\n", noiselevel=-1)
5643                 # Order needs to be preserved since a feature of --nodeps
5644                 # is to allow the user to force a specific merge order.
5645                 args.reverse()
5646                 while args:
5647                         arg = args.pop()
5648                         for atom in arg.set:
5649                                 self.spinner.update()
5650                                 dep = Dependency(atom=atom, onlydeps=onlydeps,
5651                                         root=myroot, parent=arg)
5652                                 atom_cp = portage.dep_getkey(atom)
5653                                 try:
5654                                         pprovided = pprovideddict.get(portage.dep_getkey(atom))
5655                                         if pprovided and portage.match_from_list(atom, pprovided):
5656                                                 # A provided package has been specified on the command line.
5657                                                 self._pprovided_args.append((arg, atom))
5658                                                 continue
5659                                         if isinstance(arg, PackageArg):
5660                                                 if not self._add_pkg(arg.package, dep) or \
5661                                                         not self._create_graph():
5662                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5663                                                                 "dependencies for %s\n") % arg.arg)
5664                                                         return 0, myfavorites
5665                                                 continue
5666                                         if debug:
5667                                                 portage.writemsg("      Arg: %s\n     Atom: %s\n" % \
5668                                                         (arg, atom), noiselevel=-1)
5669                                         pkg, existing_node = self._select_package(
5670                                                 myroot, atom, onlydeps=onlydeps)
5671                                         if not pkg:
5672                                                 if not (isinstance(arg, SetArg) and \
5673                                                         arg.name in ("system", "world")):
5674                                                         self._unsatisfied_deps_for_display.append(
5675                                                                 ((myroot, atom), {}))
5676                                                         return 0, myfavorites
5677                                                 self._missing_args.append((arg, atom))
5678                                                 continue
5679                                         if atom_cp != pkg.cp:
5680                                                 # For old-style virtuals, we need to repeat the
5681                                                 # package.provided check against the selected package.
5682                                                 expanded_atom = atom.replace(atom_cp, pkg.cp)
5683                                                 pprovided = pprovideddict.get(pkg.cp)
5684                                                 if pprovided and \
5685                                                         portage.match_from_list(expanded_atom, pprovided):
5686                                                         # A provided package has been
5687                                                         # specified on the command line.
5688                                                         self._pprovided_args.append((arg, atom))
5689                                                         continue
5690                                         if pkg.installed and "selective" not in self.myparams:
5691                                                 self._unsatisfied_deps_for_display.append(
5692                                                         ((myroot, atom), {}))
5693                                                 # Previous behavior was to bail out in this case, but
5694                                                 # since the dep is satisfied by the installed package,
5695                                                 # it's more friendly to continue building the graph
5696                                                 # and just show a warning message. Therefore, only bail
5697                                                 # out here if the atom is not from either the system or
5698                                                 # world set.
5699                                                 if not (isinstance(arg, SetArg) and \
5700                                                         arg.name in ("system", "world")):
5701                                                         return 0, myfavorites
5702
5703                                         # Add the selected package to the graph as soon as possible
5704                                         # so that later dep_check() calls can use it as feedback
5705                                         # for making more consistent atom selections.
5706                                         if not self._add_pkg(pkg, dep):
5707                                                 if isinstance(arg, SetArg):
5708                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5709                                                                 "dependencies for %s from %s\n") % \
5710                                                                 (atom, arg.arg))
5711                                                 else:
5712                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5713                                                                 "dependencies for %s\n") % atom)
5714                                                 return 0, myfavorites
5715
5716                                 except portage.exception.MissingSignature, e:
5717                                         portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5718                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5719                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5720                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5721                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5722                                         return 0, myfavorites
5723                                 except portage.exception.InvalidSignature, e:
5724                                         portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5725                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5726                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5727                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5728                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5729                                         return 0, myfavorites
5730                                 except SystemExit, e:
5731                                         raise # Needed else can't exit
5732                                 except Exception, e:
5733                                         print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5734                                         print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5735                                         raise
5736
5737                 # Now that the root packages have been added to the graph,
5738                 # process the dependencies.
5739                 if not self._create_graph():
5740                         return 0, myfavorites
5741
5742                 missing=0
5743                 if "--usepkgonly" in self.myopts:
5744                         for xs in self.digraph.all_nodes():
5745                                 if not isinstance(xs, Package):
5746                                         continue
5747                                 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5748                                         if missing == 0:
5749                                                 print
5750                                         missing += 1
5751                                         print "Missing binary for:",xs[2]
5752
5753                 try:
5754                         self.altlist()
5755                 except self._unknown_internal_error:
5756                         return False, myfavorites
5757
5758                 # We're true here unless we are missing binaries.
5759                 return (not missing,myfavorites)
5760
5761         def _set_args(self, args):
5762                 """
5763                 Create the "args" package set from atoms and packages given as
5764                 arguments. This method can be called multiple times if necessary.
5765                 The package selection cache is automatically invalidated, since
5766                 arguments influence package selections.
5767                 """
5768                 args_set = self._sets["args"]
5769                 args_set.clear()
5770                 for arg in args:
5771                         if not isinstance(arg, (AtomArg, PackageArg)):
5772                                 continue
5773                         atom = arg.atom
5774                         if atom in args_set:
5775                                 continue
5776                         args_set.add(atom)
5777
5778                 self._set_atoms.clear()
5779                 self._set_atoms.update(chain(*self._sets.itervalues()))
5780                 atom_arg_map = self._atom_arg_map
5781                 atom_arg_map.clear()
5782                 for arg in args:
5783                         for atom in arg.set:
5784                                 atom_key = (atom, arg.root_config.root)
5785                                 refs = atom_arg_map.get(atom_key)
5786                                 if refs is None:
5787                                         refs = []
5788                                         atom_arg_map[atom_key] = refs
5789                                         if arg not in refs:
5790                                                 refs.append(arg)
5791
5792                 # Invalidate the package selection cache, since
5793                 # arguments influence package selections.
5794                 self._highest_pkg_cache.clear()
5795                 for trees in self._filtered_trees.itervalues():
5796                         trees["porttree"].dbapi._clear_cache()
5797
5798         def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5799                 """
5800                 Return a list of slot atoms corresponding to installed slots that
5801                 differ from the slot of the highest visible match. When
5802                 blocker_lookahead is True, slot atoms that would trigger a blocker
5803                 conflict are automatically discarded, potentially allowing automatic
5804                 uninstallation of older slots when appropriate.
5805                 """
5806                 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5807                 if highest_pkg is None:
5808                         return []
5809                 vardb = root_config.trees["vartree"].dbapi
5810                 slots = set()
5811                 for cpv in vardb.match(atom):
5812                         # don't mix new virtuals with old virtuals
5813                         if portage.cpv_getkey(cpv) == highest_pkg.cp:
5814                                 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5815
5816                 slots.add(highest_pkg.metadata["SLOT"])
5817                 if len(slots) == 1:
5818                         return []
5819                 greedy_pkgs = []
5820                 slots.remove(highest_pkg.metadata["SLOT"])
5821                 while slots:
5822                         slot = slots.pop()
5823                         slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5824                         pkg, in_graph = self._select_package(root_config.root, slot_atom)
5825                         if pkg is not None and \
5826                                 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
5827                                 greedy_pkgs.append(pkg)
5828                 if not greedy_pkgs:
5829                         return []
5830                 if not blocker_lookahead:
5831                         return [pkg.slot_atom for pkg in greedy_pkgs]
5832
5833                 blockers = {}
5834                 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5835                 for pkg in greedy_pkgs + [highest_pkg]:
5836                         dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5837                         try:
5838                                 atoms = self._select_atoms(
5839                                         pkg.root, dep_str, pkg.use.enabled,
5840                                         parent=pkg, strict=True)
5841                         except portage.exception.InvalidDependString:
5842                                 continue
5843                         blocker_atoms = (x for x in atoms if x.blocker)
5844                         blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5845
5846                 if highest_pkg not in blockers:
5847                         return []
5848
5849                 # filter packages with invalid deps
5850                 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5851
5852                 # filter packages that conflict with highest_pkg
5853                 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5854                         (blockers[highest_pkg].findAtomForPackage(pkg) or \
5855                         blockers[pkg].findAtomForPackage(highest_pkg))]
5856
5857                 if not greedy_pkgs:
5858                         return []
5859
5860                 # If two packages conflict, discard the lower version.
5861                 discard_pkgs = set()
5862                 greedy_pkgs.sort(reverse=True)
5863                 for i in xrange(len(greedy_pkgs) - 1):
5864                         pkg1 = greedy_pkgs[i]
5865                         if pkg1 in discard_pkgs:
5866                                 continue
5867                         for j in xrange(i + 1, len(greedy_pkgs)):
5868                                 pkg2 = greedy_pkgs[j]
5869                                 if pkg2 in discard_pkgs:
5870                                         continue
5871                                 if blockers[pkg1].findAtomForPackage(pkg2) or \
5872                                         blockers[pkg2].findAtomForPackage(pkg1):
5873                                         # pkg1 > pkg2
5874                                         discard_pkgs.add(pkg2)
5875
5876                 return [pkg.slot_atom for pkg in greedy_pkgs \
5877                         if pkg not in discard_pkgs]
5878
5879         def _select_atoms_from_graph(self, *pargs, **kwargs):
5880                 """
5881                 Prefer atoms matching packages that have already been
5882                 added to the graph or those that are installed and have
5883                 not been scheduled for replacement.
5884                 """
5885                 kwargs["trees"] = self._graph_trees
5886                 return self._select_atoms_highest_available(*pargs, **kwargs)
5887
5888         def _select_atoms_highest_available(self, root, depstring,
5889                 myuse=None, parent=None, strict=True, trees=None, priority=None):
5890                 """This will raise InvalidDependString if necessary. If trees is
5891                 None then self._filtered_trees is used."""
5892                 pkgsettings = self.pkgsettings[root]
5893                 if trees is None:
5894                         trees = self._filtered_trees
5895                 if not getattr(priority, "buildtime", False):
5896                         # The parent should only be passed to dep_check() for buildtime
5897                         # dependencies since that's the only case when it's appropriate
5898                         # to trigger the circular dependency avoidance code which uses it.
5899                         # It's important not to trigger the same circular dependency
5900                         # avoidance code for runtime dependencies since it's not needed
5901                         # and it can promote an incorrect package choice.
5902                         parent = None
5903                 if True:
5904                         try:
5905                                 if parent is not None:
5906                                         trees[root]["parent"] = parent
5907                                 if not strict:
5908                                         portage.dep._dep_check_strict = False
5909                                 mycheck = portage.dep_check(depstring, None,
5910                                         pkgsettings, myuse=myuse,
5911                                         myroot=root, trees=trees)
5912                         finally:
5913                                 if parent is not None:
5914                                         trees[root].pop("parent")
5915                                 portage.dep._dep_check_strict = True
5916                         if not mycheck[0]:
5917                                 raise portage.exception.InvalidDependString(mycheck[1])
5918                         selected_atoms = mycheck[1]
5919                 return selected_atoms
5920
5921         def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5922                 atom = portage.dep.Atom(atom)
5923                 atom_set = InternalPackageSet(initial_atoms=(atom,))
5924                 atom_without_use = atom
5925                 if atom.use:
5926                         atom_without_use = portage.dep.remove_slot(atom)
5927                         if atom.slot:
5928                                 atom_without_use += ":" + atom.slot
5929                         atom_without_use = portage.dep.Atom(atom_without_use)
5930                 xinfo = '"%s"' % atom
5931                 if arg:
5932                         xinfo='"%s"' % arg
5933                 # Discard null/ from failed cpv_expand category expansion.
5934                 xinfo = xinfo.replace("null/", "")
5935                 masked_packages = []
5936                 missing_use = []
5937                 missing_licenses = []
5938                 have_eapi_mask = False
5939                 pkgsettings = self.pkgsettings[root]
5940                 implicit_iuse = pkgsettings._get_implicit_iuse()
5941                 root_config = self.roots[root]
5942                 portdb = self.roots[root].trees["porttree"].dbapi
5943                 dbs = self._filtered_trees[root]["dbs"]
5944                 for db, pkg_type, built, installed, db_keys in dbs:
5945                         if installed:
5946                                 continue
5947                         match = db.match
5948                         if hasattr(db, "xmatch"):
5949                                 cpv_list = db.xmatch("match-all", atom_without_use)
5950                         else:
5951                                 cpv_list = db.match(atom_without_use)
5952                         # descending order
5953                         cpv_list.reverse()
5954                         for cpv in cpv_list:
5955                                 metadata, mreasons  = get_mask_info(root_config, cpv,
5956                                         pkgsettings, db, pkg_type, built, installed, db_keys)
5957                                 if metadata is not None:
5958                                         pkg = Package(built=built, cpv=cpv,
5959                                                 installed=installed, metadata=metadata,
5960                                                 root_config=root_config)
5961                                         if pkg.cp != atom.cp:
5962                                                 # A cpv can be returned from dbapi.match() as an
5963                                                 # old-style virtual match even in cases when the
5964                                                 # package does not actually PROVIDE the virtual.
5965                                                 # Filter out any such false matches here.
5966                                                 if not atom_set.findAtomForPackage(pkg):
5967                                                         continue
5968                                         if atom.use and not mreasons:
5969                                                 missing_use.append(pkg)
5970                                                 continue
5971                                 masked_packages.append(
5972                                         (root_config, pkgsettings, cpv, metadata, mreasons))
5973
5974                 missing_use_reasons = []
5975                 missing_iuse_reasons = []
5976                 for pkg in missing_use:
5977                         use = pkg.use.enabled
5978                         iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
5979                         iuse_re = re.compile("^(%s)$" % "|".join(iuse))
5980                         missing_iuse = []
5981                         for x in atom.use.required:
5982                                 if iuse_re.match(x) is None:
5983                                         missing_iuse.append(x)
5984                         mreasons = []
5985                         if missing_iuse:
5986                                 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
5987                                 missing_iuse_reasons.append((pkg, mreasons))
5988                         else:
5989                                 need_enable = sorted(atom.use.enabled.difference(use))
5990                                 need_disable = sorted(atom.use.disabled.intersection(use))
5991                                 if need_enable or need_disable:
5992                                         changes = []
5993                                         changes.extend(colorize("red", "+" + x) \
5994                                                 for x in need_enable)
5995                                         changes.extend(colorize("blue", "-" + x) \
5996                                                 for x in need_disable)
5997                                         mreasons.append("Change USE: %s" % " ".join(changes))
5998                                         missing_use_reasons.append((pkg, mreasons))
5999
6000                 if missing_iuse_reasons and not missing_use_reasons:
6001                         missing_use_reasons = missing_iuse_reasons
6002                 elif missing_use_reasons:
6003                         # Only show the latest version.
6004                         del missing_use_reasons[1:]
6005
6006                 if missing_use_reasons:
6007                         print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
6008                         print "!!! One of the following packages is required to complete your request:"
6009                         for pkg, mreasons in missing_use_reasons:
6010                                 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
6011
6012                 elif masked_packages:
6013                         print "\n!!! " + \
6014                                 colorize("BAD", "All ebuilds that could satisfy ") + \
6015                                 colorize("INFORM", xinfo) + \
6016                                 colorize("BAD", " have been masked.")
6017                         print "!!! One of the following masked packages is required to complete your request:"
6018                         have_eapi_mask = show_masked_packages(masked_packages)
6019                         if have_eapi_mask:
6020                                 print
6021                                 msg = ("The current version of portage supports " + \
6022                                         "EAPI '%s'. You must upgrade to a newer version" + \
6023                                         " of portage before EAPI masked packages can" + \
6024                                         " be installed.") % portage.const.EAPI
6025                                 from textwrap import wrap
6026                                 for line in wrap(msg, 75):
6027                                         print line
6028                         print
6029                         show_mask_docs()
6030                 else:
6031                         print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
6032
6033                 # Show parent nodes and the argument that pulled them in.
6034                 traversed_nodes = set()
6035                 node = myparent
6036                 msg = []
6037                 while node is not None:
6038                         traversed_nodes.add(node)
6039                         msg.append('(dependency required by "%s" [%s])' % \
6040                                 (colorize('INFORM', str(node.cpv)), node.type_name))
6041                         # When traversing to parents, prefer arguments over packages
6042                         # since arguments are root nodes. Never traverse the same
6043                         # package twice, in order to prevent an infinite loop.
6044                         selected_parent = None
6045                         for parent in self.digraph.parent_nodes(node):
6046                                 if isinstance(parent, DependencyArg):
6047                                         msg.append('(dependency required by "%s" [argument])' % \
6048                                                 (colorize('INFORM', str(parent))))
6049                                         selected_parent = None
6050                                         break
6051                                 if parent not in traversed_nodes:
6052                                         selected_parent = parent
6053                         node = selected_parent
6054                 for line in msg:
6055                         print line
6056
6057                 print
6058
6059         def _select_pkg_highest_available(self, root, atom, onlydeps=False):
6060                 cache_key = (root, atom, onlydeps)
6061                 ret = self._highest_pkg_cache.get(cache_key)
6062                 if ret is not None:
6063                         pkg, existing = ret
6064                         if pkg and not existing:
6065                                 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
6066                                 if existing and existing == pkg:
6067                                         # Update the cache to reflect that the
6068                                         # package has been added to the graph.
6069                                         ret = pkg, pkg
6070                                         self._highest_pkg_cache[cache_key] = ret
6071                         return ret
6072                 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
6073                 self._highest_pkg_cache[cache_key] = ret
6074                 pkg, existing = ret
6075                 if pkg is not None:
6076                         settings = pkg.root_config.settings
6077                         if visible(settings, pkg) and not (pkg.installed and \
6078                                 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
6079                                 pkg.root_config.visible_pkgs.cpv_inject(pkg)
6080                 return ret
6081
6082         def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
6083                 root_config = self.roots[root]
6084                 pkgsettings = self.pkgsettings[root]
6085                 dbs = self._filtered_trees[root]["dbs"]
6086                 vardb = self.roots[root].trees["vartree"].dbapi
6087                 portdb = self.roots[root].trees["porttree"].dbapi
6088                 # List of acceptable packages, ordered by type preference.
6089                 matched_packages = []
6090                 highest_version = None
6091                 if not isinstance(atom, portage.dep.Atom):
6092                         atom = portage.dep.Atom(atom)
6093                 atom_cp = atom.cp
6094                 atom_set = InternalPackageSet(initial_atoms=(atom,))
6095                 existing_node = None
6096                 myeb = None
6097                 usepkgonly = "--usepkgonly" in self.myopts
6098                 empty = "empty" in self.myparams
6099                 selective = "selective" in self.myparams
6100                 reinstall = False
6101                 noreplace = "--noreplace" in self.myopts
6102                 # Behavior of the "selective" parameter depends on
6103                 # whether or not a package matches an argument atom.
6104                 # If an installed package provides an old-style
6105                 # virtual that is no longer provided by an available
6106                 # package, the installed package may match an argument
6107                 # atom even though none of the available packages do.
6108                 # Therefore, "selective" logic does not consider
6109                 # whether or not an installed package matches an
6110                 # argument atom. It only considers whether or not
6111                 # available packages match argument atoms, which is
6112                 # represented by the found_available_arg flag.
6113                 found_available_arg = False
6114                 for find_existing_node in True, False:
6115                         if existing_node:
6116                                 break
6117                         for db, pkg_type, built, installed, db_keys in dbs:
6118                                 if existing_node:
6119                                         break
6120                                 if installed and not find_existing_node:
6121                                         want_reinstall = reinstall or empty or \
6122                                                 (found_available_arg and not selective)
6123                                         if want_reinstall and matched_packages:
6124                                                 continue
6125                                 if hasattr(db, "xmatch"):
6126                                         cpv_list = db.xmatch("match-all", atom)
6127                                 else:
6128                                         cpv_list = db.match(atom)
6129
6130                                 # USE=multislot can make an installed package appear as if
6131                                 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
6132                                 # won't do any good as long as USE=multislot is enabled since
6133                                 # the newly built package still won't have the expected slot.
6134                                 # Therefore, assume that such SLOT dependencies are already
6135                                 # satisfied rather than forcing a rebuild.
6136                                 if installed and not cpv_list and atom.slot:
6137                                         for cpv in db.match(atom.cp):
6138                                                 slot_available = False
6139                                                 for other_db, other_type, other_built, \
6140                                                         other_installed, other_keys in dbs:
6141                                                         try:
6142                                                                 if atom.slot == \
6143                                                                         other_db.aux_get(cpv, ["SLOT"])[0]:
6144                                                                         slot_available = True
6145                                                                         break
6146                                                         except KeyError:
6147                                                                 pass
6148                                                 if not slot_available:
6149                                                         continue
6150                                                 inst_pkg = self._pkg(cpv, "installed",
6151                                                         root_config, installed=installed)
6152                                                 # Remove the slot from the atom and verify that
6153                                                 # the package matches the resulting atom.
6154                                                 atom_without_slot = portage.dep.remove_slot(atom)
6155                                                 if atom.use:
6156                                                         atom_without_slot += str(atom.use)
6157                                                 atom_without_slot = portage.dep.Atom(atom_without_slot)
6158                                                 if portage.match_from_list(
6159                                                         atom_without_slot, [inst_pkg]):
6160                                                         cpv_list = [inst_pkg.cpv]
6161                                                 break
6162
6163                                 if not cpv_list:
6164                                         continue
6165                                 pkg_status = "merge"
6166                                 if installed or onlydeps:
6167                                         pkg_status = "nomerge"
6168                                 # descending order
6169                                 cpv_list.reverse()
6170                                 for cpv in cpv_list:
6171                                         # Make --noreplace take precedence over --newuse.
6172                                         if not installed and noreplace and \
6173                                                 cpv in vardb.match(atom):
6174                                                 # If the installed version is masked, it may
6175                                                 # be necessary to look at lower versions,
6176                                                 # in case there is a visible downgrade.
6177                                                 continue
6178                                         reinstall_for_flags = None
6179                                         cache_key = (pkg_type, root, cpv, pkg_status)
6180                                         calculated_use = True
6181                                         pkg = self._pkg_cache.get(cache_key)
6182                                         if pkg is None:
6183                                                 calculated_use = False
6184                                                 try:
6185                                                         metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6186                                                 except KeyError:
6187                                                         continue
6188                                                 pkg = Package(built=built, cpv=cpv,
6189                                                         installed=installed, metadata=metadata,
6190                                                         onlydeps=onlydeps, root_config=root_config,
6191                                                         type_name=pkg_type)
6192                                                 metadata = pkg.metadata
6193                                                 if not built:
6194                                                         metadata['CHOST'] = pkgsettings.get('CHOST', '')
6195                                                 if not built and ("?" in metadata["LICENSE"] or \
6196                                                         "?" in metadata["PROVIDE"]):
6197                                                         # This is avoided whenever possible because
6198                                                         # it's expensive. It only needs to be done here
6199                                                         # if it has an effect on visibility.
6200                                                         pkgsettings.setcpv(pkg)
6201                                                         metadata["USE"] = pkgsettings["PORTAGE_USE"]
6202                                                         calculated_use = True
6203                                                 self._pkg_cache[pkg] = pkg
6204
6205                                         if not installed or (built and matched_packages):
6206                                                 # Only enforce visibility on installed packages
6207                                                 # if there is at least one other visible package
6208                                                 # available. By filtering installed masked packages
6209                                                 # here, packages that have been masked since they
6210                                                 # were installed can be automatically downgraded
6211                                                 # to an unmasked version.
6212                                                 try:
6213                                                         if not visible(pkgsettings, pkg):
6214                                                                 continue
6215                                                 except portage.exception.InvalidDependString:
6216                                                         if not installed:
6217                                                                 continue
6218
6219                                                 # Enable upgrade or downgrade to a version
6220                                                 # with visible KEYWORDS when the installed
6221                                                 # version is masked by KEYWORDS, but never
6222                                                 # reinstall the same exact version only due
6223                                                 # to a KEYWORDS mask.
6224                                                 if built and matched_packages:
6225
6226                                                         different_version = None
6227                                                         for avail_pkg in matched_packages:
6228                                                                 if not portage.dep.cpvequal(
6229                                                                         pkg.cpv, avail_pkg.cpv):
6230                                                                         different_version = avail_pkg
6231                                                                         break
6232                                                         if different_version is not None:
6233
6234                                                                 if installed and \
6235                                                                         pkgsettings._getMissingKeywords(
6236                                                                         pkg.cpv, pkg.metadata):
6237                                                                         continue
6238
6239                                                                 # If the ebuild no longer exists or it's
6240                                                                 # keywords have been dropped, reject built
6241                                                                 # instances (installed or binary).
6242                                                                 # If --usepkgonly is enabled, assume that
6243                                                                 # the ebuild status should be ignored.
6244                                                                 if not usepkgonly:
6245                                                                         try:
6246                                                                                 pkg_eb = self._pkg(
6247                                                                                         pkg.cpv, "ebuild", root_config)
6248                                                                         except portage.exception.PackageNotFound:
6249                                                                                 continue
6250                                                                         else:
6251                                                                                 if not visible(pkgsettings, pkg_eb):
6252                                                                                         continue
6253
6254                                         if not pkg.built and not calculated_use:
6255                                                 # This is avoided whenever possible because
6256                                                 # it's expensive.
6257                                                 pkgsettings.setcpv(pkg)
6258                                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6259
6260                                         if pkg.cp != atom.cp:
6261                                                 # A cpv can be returned from dbapi.match() as an
6262                                                 # old-style virtual match even in cases when the
6263                                                 # package does not actually PROVIDE the virtual.
6264                                                 # Filter out any such false matches here.
6265                                                 if not atom_set.findAtomForPackage(pkg):
6266                                                         continue
6267
6268                                         myarg = None
6269                                         if root == self.target_root:
6270                                                 try:
6271                                                         # Ebuild USE must have been calculated prior
6272                                                         # to this point, in case atoms have USE deps.
6273                                                         myarg = self._iter_atoms_for_pkg(pkg).next()
6274                                                 except StopIteration:
6275                                                         pass
6276                                                 except portage.exception.InvalidDependString:
6277                                                         if not installed:
6278                                                                 # masked by corruption
6279                                                                 continue
6280                                         if not installed and myarg:
6281                                                 found_available_arg = True
6282
6283                                         if atom.use and not pkg.built:
6284                                                 use = pkg.use.enabled
6285                                                 if atom.use.enabled.difference(use):
6286                                                         continue
6287                                                 if atom.use.disabled.intersection(use):
6288                                                         continue
6289                                         if pkg.cp == atom_cp:
6290                                                 if highest_version is None:
6291                                                         highest_version = pkg
6292                                                 elif pkg > highest_version:
6293                                                         highest_version = pkg
6294                                         # At this point, we've found the highest visible
6295                                         # match from the current repo. Any lower versions
6296                                         # from this repo are ignored, so this so the loop
6297                                         # will always end with a break statement below
6298                                         # this point.
6299                                         if find_existing_node:
6300                                                 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6301                                                 if not e_pkg:
6302                                                         break
6303                                                 if portage.dep.match_from_list(atom, [e_pkg]):
6304                                                         if highest_version and \
6305                                                                 e_pkg.cp == atom_cp and \
6306                                                                 e_pkg < highest_version and \
6307                                                                 e_pkg.slot_atom != highest_version.slot_atom:
6308                                                                 # There is a higher version available in a
6309                                                                 # different slot, so this existing node is
6310                                                                 # irrelevant.
6311                                                                 pass
6312                                                         else:
6313                                                                 matched_packages.append(e_pkg)
6314                                                                 existing_node = e_pkg
6315                                                 break
6316                                         # Compare built package to current config and
6317                                         # reject the built package if necessary.
6318                                         if built and not installed and \
6319                                                 ("--newuse" in self.myopts or \
6320                                                 "--reinstall" in self.myopts):
6321                                                 iuses = pkg.iuse.all
6322                                                 old_use = pkg.use.enabled
6323                                                 if myeb:
6324                                                         pkgsettings.setcpv(myeb)
6325                                                 else:
6326                                                         pkgsettings.setcpv(pkg)
6327                                                 now_use = pkgsettings["PORTAGE_USE"].split()
6328                                                 forced_flags = set()
6329                                                 forced_flags.update(pkgsettings.useforce)
6330                                                 forced_flags.update(pkgsettings.usemask)
6331                                                 cur_iuse = iuses
6332                                                 if myeb and not usepkgonly:
6333                                                         cur_iuse = myeb.iuse.all
6334                                                 if self._reinstall_for_flags(forced_flags,
6335                                                         old_use, iuses,
6336                                                         now_use, cur_iuse):
6337                                                         break
6338                                         # Compare current config to installed package
6339                                         # and do not reinstall if possible.
6340                                         if not installed and \
6341                                                 ("--newuse" in self.myopts or \
6342                                                 "--reinstall" in self.myopts) and \
6343                                                 cpv in vardb.match(atom):
6344                                                 pkgsettings.setcpv(pkg)
6345                                                 forced_flags = set()
6346                                                 forced_flags.update(pkgsettings.useforce)
6347                                                 forced_flags.update(pkgsettings.usemask)
6348                                                 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6349                                                 old_iuse = set(filter_iuse_defaults(
6350                                                         vardb.aux_get(cpv, ["IUSE"])[0].split()))
6351                                                 cur_use = pkgsettings["PORTAGE_USE"].split()
6352                                                 cur_iuse = pkg.iuse.all
6353                                                 reinstall_for_flags = \
6354                                                         self._reinstall_for_flags(
6355                                                         forced_flags, old_use, old_iuse,
6356                                                         cur_use, cur_iuse)
6357                                                 if reinstall_for_flags:
6358                                                         reinstall = True
6359                                         if not built:
6360                                                 myeb = pkg
6361                                         matched_packages.append(pkg)
6362                                         if reinstall_for_flags:
6363                                                 self._reinstall_nodes[pkg] = \
6364                                                         reinstall_for_flags
6365                                         break
6366
6367                 if not matched_packages:
6368                         return None, None
6369
6370                 if "--debug" in self.myopts:
6371                         for pkg in matched_packages:
6372                                 portage.writemsg("%s %s\n" % \
6373                                         ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6374
6375                 # Filter out any old-style virtual matches if they are
6376                 # mixed with new-style virtual matches.
6377                 cp = portage.dep_getkey(atom)
6378                 if len(matched_packages) > 1 and \
6379                         "virtual" == portage.catsplit(cp)[0]:
6380                         for pkg in matched_packages:
6381                                 if pkg.cp != cp:
6382                                         continue
6383                                 # Got a new-style virtual, so filter
6384                                 # out any old-style virtuals.
6385                                 matched_packages = [pkg for pkg in matched_packages \
6386                                         if pkg.cp == cp]
6387                                 break
6388
6389                 if len(matched_packages) > 1:
6390                         bestmatch = portage.best(
6391                                 [pkg.cpv for pkg in matched_packages])
6392                         matched_packages = [pkg for pkg in matched_packages \
6393                                 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6394
6395                 # ordered by type preference ("ebuild" type is the last resort)
6396                 return  matched_packages[-1], existing_node
6397
6398         def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6399                 """
6400                 Select packages that have already been added to the graph or
6401                 those that are installed and have not been scheduled for
6402                 replacement.
6403                 """
6404                 graph_db = self._graph_trees[root]["porttree"].dbapi
6405                 matches = graph_db.match_pkgs(atom)
6406                 if not matches:
6407                         return None, None
6408                 pkg = matches[-1] # highest match
6409                 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
6410                 return pkg, in_graph
6411
6412         def _complete_graph(self):
6413                 """
6414                 Add any deep dependencies of required sets (args, system, world) that
6415                 have not been pulled into the graph yet. This ensures that the graph
6416                 is consistent such that initially satisfied deep dependencies are not
6417                 broken in the new graph. Initially unsatisfied dependencies are
6418                 irrelevant since we only want to avoid breaking dependencies that are
6419                 intially satisfied.
6420
6421                 Since this method can consume enough time to disturb users, it is
6422                 currently only enabled by the --complete-graph option.
6423                 """
6424                 if "--buildpkgonly" in self.myopts or \
6425                         "recurse" not in self.myparams:
6426                         return 1
6427
6428                 if "complete" not in self.myparams:
6429                         # Skip this to avoid consuming enough time to disturb users.
6430                         return 1
6431
6432                 # Put the depgraph into a mode that causes it to only
6433                 # select packages that have already been added to the
6434                 # graph or those that are installed and have not been
6435                 # scheduled for replacement. Also, toggle the "deep"
6436                 # parameter so that all dependencies are traversed and
6437                 # accounted for.
6438                 self._select_atoms = self._select_atoms_from_graph
6439                 self._select_package = self._select_pkg_from_graph
6440                 already_deep = "deep" in self.myparams
6441                 if not already_deep:
6442                         self.myparams.add("deep")
6443
6444                 for root in self.roots:
6445                         required_set_names = self._required_set_names.copy()
6446                         if root == self.target_root and \
6447                                 (already_deep or "empty" in self.myparams):
6448                                 required_set_names.difference_update(self._sets)
6449                         if not required_set_names and not self._ignored_deps:
6450                                 continue
6451                         root_config = self.roots[root]
6452                         setconfig = root_config.setconfig
6453                         args = []
6454                         # Reuse existing SetArg instances when available.
6455                         for arg in self.digraph.root_nodes():
6456                                 if not isinstance(arg, SetArg):
6457                                         continue
6458                                 if arg.root_config != root_config:
6459                                         continue
6460                                 if arg.name in required_set_names:
6461                                         args.append(arg)
6462                                         required_set_names.remove(arg.name)
6463                         # Create new SetArg instances only when necessary.
6464                         for s in required_set_names:
6465                                 expanded_set = InternalPackageSet(
6466                                         initial_atoms=setconfig.getSetAtoms(s))
6467                                 atom = SETPREFIX + s
6468                                 args.append(SetArg(arg=atom, set=expanded_set,
6469                                         root_config=root_config))
6470                         vardb = root_config.trees["vartree"].dbapi
6471                         for arg in args:
6472                                 for atom in arg.set:
6473                                         self._dep_stack.append(
6474                                                 Dependency(atom=atom, root=root, parent=arg))
6475                         if self._ignored_deps:
6476                                 self._dep_stack.extend(self._ignored_deps)
6477                                 self._ignored_deps = []
6478                         if not self._create_graph(allow_unsatisfied=True):
6479                                 return 0
6480                         # Check the unsatisfied deps to see if any initially satisfied deps
6481                         # will become unsatisfied due to an upgrade. Initially unsatisfied
6482                         # deps are irrelevant since we only want to avoid breaking deps
6483                         # that are initially satisfied.
6484                         while self._unsatisfied_deps:
6485                                 dep = self._unsatisfied_deps.pop()
6486                                 matches = vardb.match_pkgs(dep.atom)
6487                                 if not matches:
6488                                         self._initially_unsatisfied_deps.append(dep)
6489                                         continue
6490                                 # An scheduled installation broke a deep dependency.
6491                                 # Add the installed package to the graph so that it
6492                                 # will be appropriately reported as a slot collision
6493                                 # (possibly solvable via backtracking).
6494                                 pkg = matches[-1] # highest match
6495                                 if not self._add_pkg(pkg, dep):
6496                                         return 0
6497                                 if not self._create_graph(allow_unsatisfied=True):
6498                                         return 0
6499                 return 1
6500
6501         def _pkg(self, cpv, type_name, root_config, installed=False):
6502                 """
6503                 Get a package instance from the cache, or create a new
6504                 one if necessary. Raises KeyError from aux_get if it
6505                 failures for some reason (package does not exist or is
6506                 corrupt).
6507                 """
6508                 operation = "merge"
6509                 if installed:
6510                         operation = "nomerge"
6511                 pkg = self._pkg_cache.get(
6512                         (type_name, root_config.root, cpv, operation))
6513                 if pkg is None:
6514                         tree_type = self.pkg_tree_map[type_name]
6515                         db = root_config.trees[tree_type].dbapi
6516                         db_keys = list(self._trees_orig[root_config.root][
6517                                 tree_type].dbapi._aux_cache_keys)
6518                         try:
6519                                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6520                         except KeyError:
6521                                 raise portage.exception.PackageNotFound(cpv)
6522                         pkg = Package(cpv=cpv, metadata=metadata,
6523                                 root_config=root_config, installed=installed)
6524                         if type_name == "ebuild":
6525                                 settings = self.pkgsettings[root_config.root]
6526                                 settings.setcpv(pkg)
6527                                 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6528                                 pkg.metadata['CHOST'] = settings.get('CHOST', '')
6529                         self._pkg_cache[pkg] = pkg
6530                 return pkg
6531
6532         def validate_blockers(self):
6533                 """Remove any blockers from the digraph that do not match any of the
6534                 packages within the graph.  If necessary, create hard deps to ensure
6535                 correct merge order such that mutually blocking packages are never
6536                 installed simultaneously."""
6537
6538                 if "--buildpkgonly" in self.myopts or \
6539                         "--nodeps" in self.myopts:
6540                         return True
6541
6542                 #if "deep" in self.myparams:
6543                 if True:
6544                         # Pull in blockers from all installed packages that haven't already
6545                         # been pulled into the depgraph.  This is not enabled by default
6546                         # due to the performance penalty that is incurred by all the
6547                         # additional dep_check calls that are required.
6548
6549                         dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6550                         for myroot in self.trees:
6551                                 vardb = self.trees[myroot]["vartree"].dbapi
6552                                 portdb = self.trees[myroot]["porttree"].dbapi
6553                                 pkgsettings = self.pkgsettings[myroot]
6554                                 final_db = self.mydbapi[myroot]
6555
6556                                 blocker_cache = BlockerCache(myroot, vardb)
6557                                 stale_cache = set(blocker_cache)
6558                                 for pkg in vardb:
6559                                         cpv = pkg.cpv
6560                                         stale_cache.discard(cpv)
6561                                         pkg_in_graph = self.digraph.contains(pkg)
6562
6563                                         # Check for masked installed packages. Only warn about
6564                                         # packages that are in the graph in order to avoid warning
6565                                         # about those that will be automatically uninstalled during
6566                                         # the merge process or by --depclean.
6567                                         if pkg in final_db:
6568                                                 if pkg_in_graph and not visible(pkgsettings, pkg):
6569                                                         self._masked_installed.add(pkg)
6570
6571                                         blocker_atoms = None
6572                                         blockers = None
6573                                         if pkg_in_graph:
6574                                                 blockers = []
6575                                                 try:
6576                                                         blockers.extend(
6577                                                                 self._blocker_parents.child_nodes(pkg))
6578                                                 except KeyError:
6579                                                         pass
6580                                                 try:
6581                                                         blockers.extend(
6582                                                                 self._irrelevant_blockers.child_nodes(pkg))
6583                                                 except KeyError:
6584                                                         pass
6585                                         if blockers is not None:
6586                                                 blockers = set(str(blocker.atom) \
6587                                                         for blocker in blockers)
6588
6589                                         # If this node has any blockers, create a "nomerge"
6590                                         # node for it so that they can be enforced.
6591                                         self.spinner.update()
6592                                         blocker_data = blocker_cache.get(cpv)
6593                                         if blocker_data is not None and \
6594                                                 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6595                                                 blocker_data = None
6596
6597                                         # If blocker data from the graph is available, use
6598                                         # it to validate the cache and update the cache if
6599                                         # it seems invalid.
6600                                         if blocker_data is not None and \
6601                                                 blockers is not None:
6602                                                 if not blockers.symmetric_difference(
6603                                                         blocker_data.atoms):
6604                                                         continue
6605                                                 blocker_data = None
6606
6607                                         if blocker_data is None and \
6608                                                 blockers is not None:
6609                                                 # Re-use the blockers from the graph.
6610                                                 blocker_atoms = sorted(blockers)
6611                                                 counter = long(pkg.metadata["COUNTER"])
6612                                                 blocker_data = \
6613                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6614                                                 blocker_cache[pkg.cpv] = blocker_data
6615                                                 continue
6616
6617                                         if blocker_data:
6618                                                 blocker_atoms = blocker_data.atoms
6619                                         else:
6620                                                 # Use aux_get() to trigger FakeVartree global
6621                                                 # updates on *DEPEND when appropriate.
6622                                                 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6623                                                 # It is crucial to pass in final_db here in order to
6624                                                 # optimize dep_check calls by eliminating atoms via
6625                                                 # dep_wordreduce and dep_eval calls.
6626                                                 try:
6627                                                         portage.dep._dep_check_strict = False
6628                                                         try:
6629                                                                 success, atoms = portage.dep_check(depstr,
6630                                                                         final_db, pkgsettings, myuse=pkg.use.enabled,
6631                                                                         trees=self._graph_trees, myroot=myroot)
6632                                                         except Exception, e:
6633                                                                 if isinstance(e, SystemExit):
6634                                                                         raise
6635                                                                 # This is helpful, for example, if a ValueError
6636                                                                 # is thrown from cpv_expand due to multiple
6637                                                                 # matches (this can happen if an atom lacks a
6638                                                                 # category).
6639                                                                 show_invalid_depstring_notice(
6640                                                                         pkg, depstr, str(e))
6641                                                                 del e
6642                                                                 raise
6643                                                 finally:
6644                                                         portage.dep._dep_check_strict = True
6645                                                 if not success:
6646                                                         replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6647                                                         if replacement_pkg and \
6648                                                                 replacement_pkg[0].operation == "merge":
6649                                                                 # This package is being replaced anyway, so
6650                                                                 # ignore invalid dependencies so as not to
6651                                                                 # annoy the user too much (otherwise they'd be
6652                                                                 # forced to manually unmerge it first).
6653                                                                 continue
6654                                                         show_invalid_depstring_notice(pkg, depstr, atoms)
6655                                                         return False
6656                                                 blocker_atoms = [myatom for myatom in atoms \
6657                                                         if myatom.startswith("!")]
6658                                                 blocker_atoms.sort()
6659                                                 counter = long(pkg.metadata["COUNTER"])
6660                                                 blocker_cache[cpv] = \
6661                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6662                                         if blocker_atoms:
6663                                                 try:
6664                                                         for atom in blocker_atoms:
6665                                                                 blocker = Blocker(atom=portage.dep.Atom(atom),
6666                                                                         eapi=pkg.metadata["EAPI"], root=myroot)
6667                                                                 self._blocker_parents.add(blocker, pkg)
6668                                                 except portage.exception.InvalidAtom, e:
6669                                                         depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6670                                                         show_invalid_depstring_notice(
6671                                                                 pkg, depstr, "Invalid Atom: %s" % (e,))
6672                                                         return False
6673                                 for cpv in stale_cache:
6674                                         del blocker_cache[cpv]
6675                                 blocker_cache.flush()
6676                                 del blocker_cache
6677
6678                 # Discard any "uninstall" tasks scheduled by previous calls
6679                 # to this method, since those tasks may not make sense given
6680                 # the current graph state.
6681                 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6682                 if previous_uninstall_tasks:
6683                         self._blocker_uninstalls = digraph()
6684                         self.digraph.difference_update(previous_uninstall_tasks)
6685
6686                 for blocker in self._blocker_parents.leaf_nodes():
6687                         self.spinner.update()
6688                         root_config = self.roots[blocker.root]
6689                         virtuals = root_config.settings.getvirtuals()
6690                         myroot = blocker.root
6691                         initial_db = self.trees[myroot]["vartree"].dbapi
6692                         final_db = self.mydbapi[myroot]
6693                         
6694                         provider_virtual = False
6695                         if blocker.cp in virtuals and \
6696                                 not self._have_new_virt(blocker.root, blocker.cp):
6697                                 provider_virtual = True
6698
6699                         if provider_virtual:
6700                                 atoms = []
6701                                 for provider_entry in virtuals[blocker.cp]:
6702                                         provider_cp = \
6703                                                 portage.dep_getkey(provider_entry)
6704                                         atoms.append(blocker.atom.replace(
6705                                                 blocker.cp, provider_cp))
6706                         else:
6707                                 atoms = [blocker.atom]
6708
6709                         blocked_initial = []
6710                         for atom in atoms:
6711                                 blocked_initial.extend(initial_db.match_pkgs(atom))
6712
6713                         blocked_final = []
6714                         for atom in atoms:
6715                                 blocked_final.extend(final_db.match_pkgs(atom))
6716
6717                         if not blocked_initial and not blocked_final:
6718                                 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6719                                 self._blocker_parents.remove(blocker)
6720                                 # Discard any parents that don't have any more blockers.
6721                                 for pkg in parent_pkgs:
6722                                         self._irrelevant_blockers.add(blocker, pkg)
6723                                         if not self._blocker_parents.child_nodes(pkg):
6724                                                 self._blocker_parents.remove(pkg)
6725                                 continue
6726                         for parent in self._blocker_parents.parent_nodes(blocker):
6727                                 unresolved_blocks = False
6728                                 depends_on_order = set()
6729                                 for pkg in blocked_initial:
6730                                         if pkg.slot_atom == parent.slot_atom:
6731                                                 # TODO: Support blocks within slots in cases where it
6732                                                 # might make sense.  For example, a new version might
6733                                                 # require that the old version be uninstalled at build
6734                                                 # time.
6735                                                 continue
6736                                         if parent.installed:
6737                                                 # Two currently installed packages conflict with
6738                                                 # eachother. Ignore this case since the damage
6739                                                 # is already done and this would be likely to
6740                                                 # confuse users if displayed like a normal blocker.
6741                                                 continue
6742
6743                                         self._blocked_pkgs.add(pkg, blocker)
6744
6745                                         if parent.operation == "merge":
6746                                                 # Maybe the blocked package can be replaced or simply
6747                                                 # unmerged to resolve this block.
6748                                                 depends_on_order.add((pkg, parent))
6749                                                 continue
6750                                         # None of the above blocker resolutions techniques apply,
6751                                         # so apparently this one is unresolvable.
6752                                         unresolved_blocks = True
6753                                 for pkg in blocked_final:
6754                                         if pkg.slot_atom == parent.slot_atom:
6755                                                 # TODO: Support blocks within slots.
6756                                                 continue
6757                                         if parent.operation == "nomerge" and \
6758                                                 pkg.operation == "nomerge":
6759                                                 # This blocker will be handled the next time that a
6760                                                 # merge of either package is triggered.
6761                                                 continue
6762
6763                                         self._blocked_pkgs.add(pkg, blocker)
6764
6765                                         # Maybe the blocking package can be
6766                                         # unmerged to resolve this block.
6767                                         if parent.operation == "merge" and pkg.installed:
6768                                                 depends_on_order.add((pkg, parent))
6769                                                 continue
6770                                         elif parent.operation == "nomerge":
6771                                                 depends_on_order.add((parent, pkg))
6772                                                 continue
6773                                         # None of the above blocker resolutions techniques apply,
6774                                         # so apparently this one is unresolvable.
6775                                         unresolved_blocks = True
6776
6777                                 # Make sure we don't unmerge any package that have been pulled
6778                                 # into the graph.
6779                                 if not unresolved_blocks and depends_on_order:
6780                                         for inst_pkg, inst_task in depends_on_order:
6781                                                 if self.digraph.contains(inst_pkg) and \
6782                                                         self.digraph.parent_nodes(inst_pkg):
6783                                                         unresolved_blocks = True
6784                                                         break
6785
6786                                 if not unresolved_blocks and depends_on_order:
6787                                         for inst_pkg, inst_task in depends_on_order:
6788                                                 uninst_task = Package(built=inst_pkg.built,
6789                                                         cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6790                                                         metadata=inst_pkg.metadata,
6791                                                         operation="uninstall",
6792                                                         root_config=inst_pkg.root_config,
6793                                                         type_name=inst_pkg.type_name)
6794                                                 self._pkg_cache[uninst_task] = uninst_task
6795                                                 # Enforce correct merge order with a hard dep.
6796                                                 self.digraph.addnode(uninst_task, inst_task,
6797                                                         priority=BlockerDepPriority.instance)
6798                                                 # Count references to this blocker so that it can be
6799                                                 # invalidated after nodes referencing it have been
6800                                                 # merged.
6801                                                 self._blocker_uninstalls.addnode(uninst_task, blocker)
6802                                 if not unresolved_blocks and not depends_on_order:
6803                                         self._irrelevant_blockers.add(blocker, parent)
6804                                         self._blocker_parents.remove_edge(blocker, parent)
6805                                         if not self._blocker_parents.parent_nodes(blocker):
6806                                                 self._blocker_parents.remove(blocker)
6807                                         if not self._blocker_parents.child_nodes(parent):
6808                                                 self._blocker_parents.remove(parent)
6809                                 if unresolved_blocks:
6810                                         self._unsolvable_blockers.add(blocker, parent)
6811
6812                 return True
6813
6814         def _accept_blocker_conflicts(self):
6815                 acceptable = False
6816                 for x in ("--buildpkgonly", "--fetchonly",
6817                         "--fetch-all-uri", "--nodeps"):
6818                         if x in self.myopts:
6819                                 acceptable = True
6820                                 break
6821                 return acceptable
6822
6823         def _merge_order_bias(self, mygraph):
6824                 """
6825                 For optimal leaf node selection, promote deep system runtime deps and
6826                 order nodes from highest to lowest overall reference count.
6827                 """
6828
6829                 node_info = {}
6830                 for node in mygraph.order:
6831                         node_info[node] = len(mygraph.parent_nodes(node))
6832                 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
6833
6834                 def cmp_merge_preference(node1, node2):
6835
6836                         if node1.operation == 'uninstall':
6837                                 if node2.operation == 'uninstall':
6838                                         return 0
6839                                 return 1
6840
6841                         if node2.operation == 'uninstall':
6842                                 if node1.operation == 'uninstall':
6843                                         return 0
6844                                 return -1
6845
6846                         node1_sys = node1 in deep_system_deps
6847                         node2_sys = node2 in deep_system_deps
6848                         if node1_sys != node2_sys:
6849                                 if node1_sys:
6850                                         return -1
6851                                 return 1
6852
6853                         return node_info[node2] - node_info[node1]
6854
6855                 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
6856
6857         def altlist(self, reversed=False):
6858
6859                 while self._serialized_tasks_cache is None:
6860                         self._resolve_conflicts()
6861                         try:
6862                                 self._serialized_tasks_cache, self._scheduler_graph = \
6863                                         self._serialize_tasks()
6864                         except self._serialize_tasks_retry:
6865                                 pass
6866
6867                 retlist = self._serialized_tasks_cache[:]
6868                 if reversed:
6869                         retlist.reverse()
6870                 return retlist
6871
6872         def schedulerGraph(self):
6873                 """
6874                 The scheduler graph is identical to the normal one except that
6875                 uninstall edges are reversed in specific cases that require
6876                 conflicting packages to be temporarily installed simultaneously.
6877                 This is intended for use by the Scheduler in it's parallelization
6878                 logic. It ensures that temporary simultaneous installation of
6879                 conflicting packages is avoided when appropriate (especially for
6880                 !!atom blockers), but allowed in specific cases that require it.
6881
6882                 Note that this method calls break_refs() which alters the state of
6883                 internal Package instances such that this depgraph instance should
6884                 not be used to perform any more calculations.
6885                 """
6886                 if self._scheduler_graph is None:
6887                         self.altlist()
6888                 self.break_refs(self._scheduler_graph.order)
6889                 return self._scheduler_graph
6890
6891         def break_refs(self, nodes):
6892                 """
6893                 Take a mergelist like that returned from self.altlist() and
6894                 break any references that lead back to the depgraph. This is
6895                 useful if you want to hold references to packages without
6896                 also holding the depgraph on the heap.
6897                 """
6898                 for node in nodes:
6899                         if hasattr(node, "root_config"):
6900                                 # The FakeVartree references the _package_cache which
6901                                 # references the depgraph. So that Package instances don't
6902                                 # hold the depgraph and FakeVartree on the heap, replace
6903                                 # the RootConfig that references the FakeVartree with the
6904                                 # original RootConfig instance which references the actual
6905                                 # vartree.
6906                                 node.root_config = \
6907                                         self._trees_orig[node.root_config.root]["root_config"]
6908
6909         def _resolve_conflicts(self):
6910                 if not self._complete_graph():
6911                         raise self._unknown_internal_error()
6912
6913                 if not self.validate_blockers():
6914                         raise self._unknown_internal_error()
6915
6916                 if self._slot_collision_info:
6917                         self._process_slot_conflicts()
6918
6919         def _serialize_tasks(self):
6920
6921                 if "--debug" in self.myopts:
6922                         writemsg("\ndigraph:\n\n", noiselevel=-1)
6923                         self.digraph.debug_print()
6924                         writemsg("\n", noiselevel=-1)
6925
6926                 scheduler_graph = self.digraph.copy()
6927                 mygraph=self.digraph.copy()
6928                 # Prune "nomerge" root nodes if nothing depends on them, since
6929                 # otherwise they slow down merge order calculation. Don't remove
6930                 # non-root nodes since they help optimize merge order in some cases
6931                 # such as revdep-rebuild.
6932                 removed_nodes = set()
6933                 while True:
6934                         for node in mygraph.root_nodes():
6935                                 if not isinstance(node, Package) or \
6936                                         node.installed or node.onlydeps:
6937                                         removed_nodes.add(node)
6938                         if removed_nodes:
6939                                 self.spinner.update()
6940                                 mygraph.difference_update(removed_nodes)
6941                         if not removed_nodes:
6942                                 break
6943                         removed_nodes.clear()
6944                 self._merge_order_bias(mygraph)
6945                 def cmp_circular_bias(n1, n2):
6946                         """
6947                         RDEPEND is stronger than PDEPEND and this function
6948                         measures such a strength bias within a circular
6949                         dependency relationship.
6950                         """
6951                         n1_n2_medium = n2 in mygraph.child_nodes(n1,
6952                                 ignore_priority=priority_range.ignore_medium_soft)
6953                         n2_n1_medium = n1 in mygraph.child_nodes(n2,
6954                                 ignore_priority=priority_range.ignore_medium_soft)
6955                         if n1_n2_medium == n2_n1_medium:
6956                                 return 0
6957                         elif n1_n2_medium:
6958                                 return 1
6959                         return -1
6960                 myblocker_uninstalls = self._blocker_uninstalls.copy()
6961                 retlist=[]
6962                 # Contains uninstall tasks that have been scheduled to
6963                 # occur after overlapping blockers have been installed.
6964                 scheduled_uninstalls = set()
6965                 # Contains any Uninstall tasks that have been ignored
6966                 # in order to avoid the circular deps code path. These
6967                 # correspond to blocker conflicts that could not be
6968                 # resolved.
6969                 ignored_uninstall_tasks = set()
6970                 have_uninstall_task = False
6971                 complete = "complete" in self.myparams
6972                 asap_nodes = []
6973
6974                 def get_nodes(**kwargs):
6975                         """
6976                         Returns leaf nodes excluding Uninstall instances
6977                         since those should be executed as late as possible.
6978                         """
6979                         return [node for node in mygraph.leaf_nodes(**kwargs) \
6980                                 if isinstance(node, Package) and \
6981                                         (node.operation != "uninstall" or \
6982                                         node in scheduled_uninstalls)]
6983
6984                 # sys-apps/portage needs special treatment if ROOT="/"
6985                 running_root = self._running_root.root
6986                 from portage.const import PORTAGE_PACKAGE_ATOM
6987                 runtime_deps = InternalPackageSet(
6988                         initial_atoms=[PORTAGE_PACKAGE_ATOM])
6989                 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
6990                         PORTAGE_PACKAGE_ATOM)
6991                 replacement_portage = self.mydbapi[running_root].match_pkgs(
6992                         PORTAGE_PACKAGE_ATOM)
6993
6994                 if running_portage:
6995                         running_portage = running_portage[0]
6996                 else:
6997                         running_portage = None
6998
6999                 if replacement_portage:
7000                         replacement_portage = replacement_portage[0]
7001                 else:
7002                         replacement_portage = None
7003
7004                 if replacement_portage == running_portage:
7005                         replacement_portage = None
7006
7007                 if replacement_portage is not None:
7008                         # update from running_portage to replacement_portage asap
7009                         asap_nodes.append(replacement_portage)
7010
7011                 if running_portage is not None:
7012                         try:
7013                                 portage_rdepend = self._select_atoms_highest_available(
7014                                         running_root, running_portage.metadata["RDEPEND"],
7015                                         myuse=running_portage.use.enabled,
7016                                         parent=running_portage, strict=False)
7017                         except portage.exception.InvalidDependString, e:
7018                                 portage.writemsg("!!! Invalid RDEPEND in " + \
7019                                         "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
7020                                         (running_root, running_portage.cpv, e), noiselevel=-1)
7021                                 del e
7022                                 portage_rdepend = []
7023                         runtime_deps.update(atom for atom in portage_rdepend \
7024                                 if not atom.startswith("!"))
7025
7026                 def gather_deps(ignore_priority, mergeable_nodes,
7027                         selected_nodes, node):
7028                         """
7029                         Recursively gather a group of nodes that RDEPEND on
7030                         eachother. This ensures that they are merged as a group
7031                         and get their RDEPENDs satisfied as soon as possible.
7032                         """
7033                         if node in selected_nodes:
7034                                 return True
7035                         if node not in mergeable_nodes:
7036                                 return False
7037                         if node == replacement_portage and \
7038                                 mygraph.child_nodes(node,
7039                                 ignore_priority=priority_range.ignore_medium_soft):
7040                                 # Make sure that portage always has all of it's
7041                                 # RDEPENDs installed first.
7042                                 return False
7043                         selected_nodes.add(node)
7044                         for child in mygraph.child_nodes(node,
7045                                 ignore_priority=ignore_priority):
7046                                 if not gather_deps(ignore_priority,
7047                                         mergeable_nodes, selected_nodes, child):
7048                                         return False
7049                         return True
7050
7051                 def ignore_uninst_or_med(priority):
7052                         if priority is BlockerDepPriority.instance:
7053                                 return True
7054                         return priority_range.ignore_medium(priority)
7055
7056                 def ignore_uninst_or_med_soft(priority):
7057                         if priority is BlockerDepPriority.instance:
7058                                 return True
7059                         return priority_range.ignore_medium_soft(priority)
7060
7061                 tree_mode = "--tree" in self.myopts
7062                 # Tracks whether or not the current iteration should prefer asap_nodes
7063                 # if available.  This is set to False when the previous iteration
7064                 # failed to select any nodes.  It is reset whenever nodes are
7065                 # successfully selected.
7066                 prefer_asap = True
7067
7068                 # Controls whether or not the current iteration should drop edges that
7069                 # are "satisfied" by installed packages, in order to solve circular
7070                 # dependencies. The deep runtime dependencies of installed packages are
7071                 # not checked in this case (bug #199856), so it must be avoided
7072                 # whenever possible.
7073                 drop_satisfied = False
7074
7075                 # State of variables for successive iterations that loosen the
7076                 # criteria for node selection.
7077                 #
7078                 # iteration   prefer_asap   drop_satisfied
7079                 # 1           True          False
7080                 # 2           False         False
7081                 # 3           False         True
7082                 #
7083                 # If no nodes are selected on the last iteration, it is due to
7084                 # unresolved blockers or circular dependencies.
7085
7086                 while not mygraph.empty():
7087                         self.spinner.update()
7088                         selected_nodes = None
7089                         ignore_priority = None
7090                         if drop_satisfied or (prefer_asap and asap_nodes):
7091                                 priority_range = DepPrioritySatisfiedRange
7092                         else:
7093                                 priority_range = DepPriorityNormalRange
7094                         if prefer_asap and asap_nodes:
7095                                 # ASAP nodes are merged before their soft deps. Go ahead and
7096                                 # select root nodes here if necessary, since it's typical for
7097                                 # the parent to have been removed from the graph already.
7098                                 asap_nodes = [node for node in asap_nodes \
7099                                         if mygraph.contains(node)]
7100                                 for node in asap_nodes:
7101                                         if not mygraph.child_nodes(node,
7102                                                 ignore_priority=priority_range.ignore_soft):
7103                                                 selected_nodes = [node]
7104                                                 asap_nodes.remove(node)
7105                                                 break
7106                         if not selected_nodes and \
7107                                 not (prefer_asap and asap_nodes):
7108                                 for i in xrange(priority_range.NONE,
7109                                         priority_range.MEDIUM_SOFT + 1):
7110                                         ignore_priority = priority_range.ignore_priority[i]
7111                                         nodes = get_nodes(ignore_priority=ignore_priority)
7112                                         if nodes:
7113                                                 # If there is a mix of uninstall nodes with other
7114                                                 # types, save the uninstall nodes for later since
7115                                                 # sometimes a merge node will render an uninstall
7116                                                 # node unnecessary (due to occupying the same slot),
7117                                                 # and we want to avoid executing a separate uninstall
7118                                                 # task in that case.
7119                                                 if len(nodes) > 1:
7120                                                         good_uninstalls = []
7121                                                         with_some_uninstalls_excluded = []
7122                                                         for node in nodes:
7123                                                                 if node.operation == "uninstall":
7124                                                                         slot_node = self.mydbapi[node.root
7125                                                                                 ].match_pkgs(node.slot_atom)
7126                                                                         if slot_node and \
7127                                                                                 slot_node[0].operation == "merge":
7128                                                                                 continue
7129                                                                         good_uninstalls.append(node)
7130                                                                 with_some_uninstalls_excluded.append(node)
7131                                                         if good_uninstalls:
7132                                                                 nodes = good_uninstalls
7133                                                         elif with_some_uninstalls_excluded:
7134                                                                 nodes = with_some_uninstalls_excluded
7135                                                         else:
7136                                                                 nodes = nodes
7137
7138                                                 if ignore_priority is None and not tree_mode:
7139                                                         # Greedily pop all of these nodes since no
7140                                                         # relationship has been ignored. This optimization
7141                                                         # destroys --tree output, so it's disabled in tree
7142                                                         # mode.
7143                                                         selected_nodes = nodes
7144                                                 else:
7145                                                         # For optimal merge order:
7146                                                         #  * Only pop one node.
7147                                                         #  * Removing a root node (node without a parent)
7148                                                         #    will not produce a leaf node, so avoid it.
7149                                                         #  * It's normal for a selected uninstall to be a
7150                                                         #    root node, so don't check them for parents.
7151                                                         for node in nodes:
7152                                                                 if node.operation == "uninstall" or \
7153                                                                         mygraph.parent_nodes(node):
7154                                                                         selected_nodes = [node]
7155                                                                         break
7156
7157                                                 if selected_nodes:
7158                                                         break
7159
7160                         if not selected_nodes:
7161                                 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
7162                                 if nodes:
7163                                         mergeable_nodes = set(nodes)
7164                                         if prefer_asap and asap_nodes:
7165                                                 nodes = asap_nodes
7166                                         for i in xrange(priority_range.SOFT,
7167                                                 priority_range.MEDIUM_SOFT + 1):
7168                                                 ignore_priority = priority_range.ignore_priority[i]
7169                                                 for node in nodes:
7170                                                         if not mygraph.parent_nodes(node):
7171                                                                 continue
7172                                                         selected_nodes = set()
7173                                                         if gather_deps(ignore_priority,
7174                                                                 mergeable_nodes, selected_nodes, node):
7175                                                                 break
7176                                                         else:
7177                                                                 selected_nodes = None
7178                                                 if selected_nodes:
7179                                                         break
7180
7181                                         if prefer_asap and asap_nodes and not selected_nodes:
7182                                                 # We failed to find any asap nodes to merge, so ignore
7183                                                 # them for the next iteration.
7184                                                 prefer_asap = False
7185                                                 continue
7186
7187                         if selected_nodes and ignore_priority is not None:
7188                                 # Try to merge ignored medium_soft deps as soon as possible
7189                                 # if they're not satisfied by installed packages.
7190                                 for node in selected_nodes:
7191                                         children = set(mygraph.child_nodes(node))
7192                                         soft = children.difference(
7193                                                 mygraph.child_nodes(node,
7194                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
7195                                         medium_soft = children.difference(
7196                                                 mygraph.child_nodes(node,
7197                                                         ignore_priority = \
7198                                                         DepPrioritySatisfiedRange.ignore_medium_soft))
7199                                         medium_soft.difference_update(soft)
7200                                         for child in medium_soft:
7201                                                 if child in selected_nodes:
7202                                                         continue
7203                                                 if child in asap_nodes:
7204                                                         continue
7205                                                 asap_nodes.append(child)
7206
7207                         if selected_nodes and len(selected_nodes) > 1:
7208                                 if not isinstance(selected_nodes, list):
7209                                         selected_nodes = list(selected_nodes)
7210                                 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
7211
7212                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7213                                 # An Uninstall task needs to be executed in order to
7214                                 # avoid conflict if possible.
7215
7216                                 if drop_satisfied:
7217                                         priority_range = DepPrioritySatisfiedRange
7218                                 else:
7219                                         priority_range = DepPriorityNormalRange
7220
7221                                 mergeable_nodes = get_nodes(
7222                                         ignore_priority=ignore_uninst_or_med)
7223
7224                                 min_parent_deps = None
7225                                 uninst_task = None
7226                                 for task in myblocker_uninstalls.leaf_nodes():
7227                                         # Do some sanity checks so that system or world packages
7228                                         # don't get uninstalled inappropriately here (only really
7229                                         # necessary when --complete-graph has not been enabled).
7230
7231                                         if task in ignored_uninstall_tasks:
7232                                                 continue
7233
7234                                         if task in scheduled_uninstalls:
7235                                                 # It's been scheduled but it hasn't
7236                                                 # been executed yet due to dependence
7237                                                 # on installation of blocking packages.
7238                                                 continue
7239
7240                                         root_config = self.roots[task.root]
7241                                         inst_pkg = self._pkg_cache[
7242                                                 ("installed", task.root, task.cpv, "nomerge")]
7243
7244                                         if self.digraph.contains(inst_pkg):
7245                                                 continue
7246
7247                                         forbid_overlap = False
7248                                         heuristic_overlap = False
7249                                         for blocker in myblocker_uninstalls.parent_nodes(task):
7250                                                 if blocker.eapi in ("0", "1"):
7251                                                         heuristic_overlap = True
7252                                                 elif blocker.atom.blocker.overlap.forbid:
7253                                                         forbid_overlap = True
7254                                                         break
7255                                         if forbid_overlap and running_root == task.root:
7256                                                 continue
7257
7258                                         if heuristic_overlap and running_root == task.root:
7259                                                 # Never uninstall sys-apps/portage or it's essential
7260                                                 # dependencies, except through replacement.
7261                                                 try:
7262                                                         runtime_dep_atoms = \
7263                                                                 list(runtime_deps.iterAtomsForPackage(task))
7264                                                 except portage.exception.InvalidDependString, e:
7265                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7266                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7267                                                                 (task.root, task.cpv, e), noiselevel=-1)
7268                                                         del e
7269                                                         continue
7270
7271                                                 # Don't uninstall a runtime dep if it appears
7272                                                 # to be the only suitable one installed.
7273                                                 skip = False
7274                                                 vardb = root_config.trees["vartree"].dbapi
7275                                                 for atom in runtime_dep_atoms:
7276                                                         other_version = None
7277                                                         for pkg in vardb.match_pkgs(atom):
7278                                                                 if pkg.cpv == task.cpv and \
7279                                                                         pkg.metadata["COUNTER"] == \
7280                                                                         task.metadata["COUNTER"]:
7281                                                                         continue
7282                                                                 other_version = pkg
7283                                                                 break
7284                                                         if other_version is None:
7285                                                                 skip = True
7286                                                                 break
7287                                                 if skip:
7288                                                         continue
7289
7290                                                 # For packages in the system set, don't take
7291                                                 # any chances. If the conflict can't be resolved
7292                                                 # by a normal replacement operation then abort.
7293                                                 skip = False
7294                                                 try:
7295                                                         for atom in root_config.sets[
7296                                                                 "system"].iterAtomsForPackage(task):
7297                                                                 skip = True
7298                                                                 break
7299                                                 except portage.exception.InvalidDependString, e:
7300                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7301                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7302                                                                 (task.root, task.cpv, e), noiselevel=-1)
7303                                                         del e
7304                                                         skip = True
7305                                                 if skip:
7306                                                         continue
7307
7308                                         # Note that the world check isn't always
7309                                         # necessary since self._complete_graph() will
7310                                         # add all packages from the system and world sets to the
7311                                         # graph. This just allows unresolved conflicts to be
7312                                         # detected as early as possible, which makes it possible
7313                                         # to avoid calling self._complete_graph() when it is
7314                                         # unnecessary due to blockers triggering an abortion.
7315                                         if not complete:
7316                                                 # For packages in the world set, go ahead an uninstall
7317                                                 # when necessary, as long as the atom will be satisfied
7318                                                 # in the final state.
7319                                                 graph_db = self.mydbapi[task.root]
7320                                                 skip = False
7321                                                 try:
7322                                                         for atom in root_config.sets[
7323                                                                 "world"].iterAtomsForPackage(task):
7324                                                                 satisfied = False
7325                                                                 for pkg in graph_db.match_pkgs(atom):
7326                                                                         if pkg == inst_pkg:
7327                                                                                 continue
7328                                                                         satisfied = True
7329                                                                         break
7330                                                                 if not satisfied:
7331                                                                         skip = True
7332                                                                         self._blocked_world_pkgs[inst_pkg] = atom
7333                                                                         break
7334                                                 except portage.exception.InvalidDependString, e:
7335                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7336                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7337                                                                 (task.root, task.cpv, e), noiselevel=-1)
7338                                                         del e
7339                                                         skip = True
7340                                                 if skip:
7341                                                         continue
7342
7343                                         # Check the deps of parent nodes to ensure that
7344                                         # the chosen task produces a leaf node. Maybe
7345                                         # this can be optimized some more to make the
7346                                         # best possible choice, but the current algorithm
7347                                         # is simple and should be near optimal for most
7348                                         # common cases.
7349                                         mergeable_parent = False
7350                                         parent_deps = set()
7351                                         for parent in mygraph.parent_nodes(task):
7352                                                 parent_deps.update(mygraph.child_nodes(parent,
7353                                                         ignore_priority=priority_range.ignore_medium_soft))
7354                                                 if parent in mergeable_nodes and \
7355                                                         gather_deps(ignore_uninst_or_med_soft,
7356                                                         mergeable_nodes, set(), parent):
7357                                                         mergeable_parent = True
7358
7359                                         if not mergeable_parent:
7360                                                 continue
7361
7362                                         parent_deps.remove(task)
7363                                         if min_parent_deps is None or \
7364                                                 len(parent_deps) < min_parent_deps:
7365                                                 min_parent_deps = len(parent_deps)
7366                                                 uninst_task = task
7367
7368                                 if uninst_task is not None:
7369                                         # The uninstall is performed only after blocking
7370                                         # packages have been merged on top of it. File
7371                                         # collisions between blocking packages are detected
7372                                         # and removed from the list of files to be uninstalled.
7373                                         scheduled_uninstalls.add(uninst_task)
7374                                         parent_nodes = mygraph.parent_nodes(uninst_task)
7375
7376                                         # Reverse the parent -> uninstall edges since we want
7377                                         # to do the uninstall after blocking packages have
7378                                         # been merged on top of it.
7379                                         mygraph.remove(uninst_task)
7380                                         for blocked_pkg in parent_nodes:
7381                                                 mygraph.add(blocked_pkg, uninst_task,
7382                                                         priority=BlockerDepPriority.instance)
7383                                                 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7384                                                 scheduler_graph.add(blocked_pkg, uninst_task,
7385                                                         priority=BlockerDepPriority.instance)
7386
7387                                         # Reset the state variables for leaf node selection and
7388                                         # continue trying to select leaf nodes.
7389                                         prefer_asap = True
7390                                         drop_satisfied = False
7391                                         continue
7392
7393                         if not selected_nodes:
7394                                 # Only select root nodes as a last resort. This case should
7395                                 # only trigger when the graph is nearly empty and the only
7396                                 # remaining nodes are isolated (no parents or children). Since
7397                                 # the nodes must be isolated, ignore_priority is not needed.
7398                                 selected_nodes = get_nodes()
7399
7400                         if not selected_nodes and not drop_satisfied:
7401                                 drop_satisfied = True
7402                                 continue
7403
7404                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7405                                 # If possible, drop an uninstall task here in order to avoid
7406                                 # the circular deps code path. The corresponding blocker will
7407                                 # still be counted as an unresolved conflict.
7408                                 uninst_task = None
7409                                 for node in myblocker_uninstalls.leaf_nodes():
7410                                         try:
7411                                                 mygraph.remove(node)
7412                                         except KeyError:
7413                                                 pass
7414                                         else:
7415                                                 uninst_task = node
7416                                                 ignored_uninstall_tasks.add(node)
7417                                                 break
7418
7419                                 if uninst_task is not None:
7420                                         # Reset the state variables for leaf node selection and
7421                                         # continue trying to select leaf nodes.
7422                                         prefer_asap = True
7423                                         drop_satisfied = False
7424                                         continue
7425
7426                         if not selected_nodes:
7427                                 self._circular_deps_for_display = mygraph
7428                                 raise self._unknown_internal_error()
7429
7430                         # At this point, we've succeeded in selecting one or more nodes, so
7431                         # reset state variables for leaf node selection.
7432                         prefer_asap = True
7433                         drop_satisfied = False
7434
7435                         mygraph.difference_update(selected_nodes)
7436
7437                         for node in selected_nodes:
7438                                 if isinstance(node, Package) and \
7439                                         node.operation == "nomerge":
7440                                         continue
7441
7442                                 # Handle interactions between blockers
7443                                 # and uninstallation tasks.
7444                                 solved_blockers = set()
7445                                 uninst_task = None
7446                                 if isinstance(node, Package) and \
7447                                         "uninstall" == node.operation:
7448                                         have_uninstall_task = True
7449                                         uninst_task = node
7450                                 else:
7451                                         vardb = self.trees[node.root]["vartree"].dbapi
7452                                         previous_cpv = vardb.match(node.slot_atom)
7453                                         if previous_cpv:
7454                                                 # The package will be replaced by this one, so remove
7455                                                 # the corresponding Uninstall task if necessary.
7456                                                 previous_cpv = previous_cpv[0]
7457                                                 uninst_task = \
7458                                                         ("installed", node.root, previous_cpv, "uninstall")
7459                                                 try:
7460                                                         mygraph.remove(uninst_task)
7461                                                 except KeyError:
7462                                                         pass
7463
7464                                 if uninst_task is not None and \
7465                                         uninst_task not in ignored_uninstall_tasks and \
7466                                         myblocker_uninstalls.contains(uninst_task):
7467                                         blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7468                                         myblocker_uninstalls.remove(uninst_task)
7469                                         # Discard any blockers that this Uninstall solves.
7470                                         for blocker in blocker_nodes:
7471                                                 if not myblocker_uninstalls.child_nodes(blocker):
7472                                                         myblocker_uninstalls.remove(blocker)
7473                                                         solved_blockers.add(blocker)
7474
7475                                 retlist.append(node)
7476
7477                                 if (isinstance(node, Package) and \
7478                                         "uninstall" == node.operation) or \
7479                                         (uninst_task is not None and \
7480                                         uninst_task in scheduled_uninstalls):
7481                                         # Include satisfied blockers in the merge list
7482                                         # since the user might be interested and also
7483                                         # it serves as an indicator that blocking packages
7484                                         # will be temporarily installed simultaneously.
7485                                         for blocker in solved_blockers:
7486                                                 retlist.append(Blocker(atom=blocker.atom,
7487                                                         root=blocker.root, eapi=blocker.eapi,
7488                                                         satisfied=True))
7489
7490                 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7491                 for node in myblocker_uninstalls.root_nodes():
7492                         unsolvable_blockers.add(node)
7493
7494                 for blocker in unsolvable_blockers:
7495                         retlist.append(blocker)
7496
7497                 # If any Uninstall tasks need to be executed in order
7498                 # to avoid a conflict, complete the graph with any
7499                 # dependencies that may have been initially
7500                 # neglected (to ensure that unsafe Uninstall tasks
7501                 # are properly identified and blocked from execution).
7502                 if have_uninstall_task and \
7503                         not complete and \
7504                         not unsolvable_blockers:
7505                         self.myparams.add("complete")
7506                         raise self._serialize_tasks_retry("")
7507
7508                 if unsolvable_blockers and \
7509                         not self._accept_blocker_conflicts():
7510                         self._unsatisfied_blockers_for_display = unsolvable_blockers
7511                         self._serialized_tasks_cache = retlist[:]
7512                         self._scheduler_graph = scheduler_graph
7513                         raise self._unknown_internal_error()
7514
7515                 if self._slot_collision_info and \
7516                         not self._accept_blocker_conflicts():
7517                         self._serialized_tasks_cache = retlist[:]
7518                         self._scheduler_graph = scheduler_graph
7519                         raise self._unknown_internal_error()
7520
7521                 return retlist, scheduler_graph
7522
7523         def _show_circular_deps(self, mygraph):
7524                 # No leaf nodes are available, so we have a circular
7525                 # dependency panic situation.  Reduce the noise level to a
7526                 # minimum via repeated elimination of root nodes since they
7527                 # have no parents and thus can not be part of a cycle.
7528                 while True:
7529                         root_nodes = mygraph.root_nodes(
7530                                 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
7531                         if not root_nodes:
7532                                 break
7533                         mygraph.difference_update(root_nodes)
7534                 # Display the USE flags that are enabled on nodes that are part
7535                 # of dependency cycles in case that helps the user decide to
7536                 # disable some of them.
7537                 display_order = []
7538                 tempgraph = mygraph.copy()
7539                 while not tempgraph.empty():
7540                         nodes = tempgraph.leaf_nodes()
7541                         if not nodes:
7542                                 node = tempgraph.order[0]
7543                         else:
7544                                 node = nodes[0]
7545                         display_order.append(node)
7546                         tempgraph.remove(node)
7547                 display_order.reverse()
7548                 self.myopts.pop("--quiet", None)
7549                 self.myopts.pop("--verbose", None)
7550                 self.myopts["--tree"] = True
7551                 portage.writemsg("\n\n", noiselevel=-1)
7552                 self.display(display_order)
7553                 prefix = colorize("BAD", " * ")
7554                 portage.writemsg("\n", noiselevel=-1)
7555                 portage.writemsg(prefix + "Error: circular dependencies:\n",
7556                         noiselevel=-1)
7557                 portage.writemsg("\n", noiselevel=-1)
7558                 mygraph.debug_print()
7559                 portage.writemsg("\n", noiselevel=-1)
7560                 portage.writemsg(prefix + "Note that circular dependencies " + \
7561                         "can often be avoided by temporarily\n", noiselevel=-1)
7562                 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7563                         "optional dependencies.\n", noiselevel=-1)
7564
7565         def _show_merge_list(self):
7566                 if self._serialized_tasks_cache is not None and \
7567                         not (self._displayed_list and \
7568                         (self._displayed_list == self._serialized_tasks_cache or \
7569                         self._displayed_list == \
7570                                 list(reversed(self._serialized_tasks_cache)))):
7571                         display_list = self._serialized_tasks_cache[:]
7572                         if "--tree" in self.myopts:
7573                                 display_list.reverse()
7574                         self.display(display_list)
7575
7576         def _show_unsatisfied_blockers(self, blockers):
7577                 self._show_merge_list()
7578                 msg = "Error: The above package list contains " + \
7579                         "packages which cannot be installed " + \
7580                         "at the same time on the same system."
7581                 prefix = colorize("BAD", " * ")
7582                 from textwrap import wrap
7583                 portage.writemsg("\n", noiselevel=-1)
7584                 for line in wrap(msg, 70):
7585                         portage.writemsg(prefix + line + "\n", noiselevel=-1)
7586
7587                 # Display the conflicting packages along with the packages
7588                 # that pulled them in. This is helpful for troubleshooting
7589                 # cases in which blockers don't solve automatically and
7590                 # the reasons are not apparent from the normal merge list
7591                 # display.
7592
7593                 conflict_pkgs = {}
7594                 for blocker in blockers:
7595                         for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7596                                 self._blocker_parents.parent_nodes(blocker)):
7597                                 parent_atoms = self._parent_atoms.get(pkg)
7598                                 if not parent_atoms:
7599                                         atom = self._blocked_world_pkgs.get(pkg)
7600                                         if atom is not None:
7601                                                 parent_atoms = set([("@world", atom)])
7602                                 if parent_atoms:
7603                                         conflict_pkgs[pkg] = parent_atoms
7604
7605                 if conflict_pkgs:
7606                         # Reduce noise by pruning packages that are only
7607                         # pulled in by other conflict packages.
7608                         pruned_pkgs = set()
7609                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7610                                 relevant_parent = False
7611                                 for parent, atom in parent_atoms:
7612                                         if parent not in conflict_pkgs:
7613                                                 relevant_parent = True
7614                                                 break
7615                                 if not relevant_parent:
7616                                         pruned_pkgs.add(pkg)
7617                         for pkg in pruned_pkgs:
7618                                 del conflict_pkgs[pkg]
7619
7620                 if conflict_pkgs:
7621                         msg = []
7622                         msg.append("\n")
7623                         indent = "  "
7624                         # Max number of parents shown, to avoid flooding the display.
7625                         max_parents = 3
7626                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7627
7628                                 pruned_list = set()
7629
7630                                 # Prefer packages that are not directly involved in a conflict.
7631                                 for parent_atom in parent_atoms:
7632                                         if len(pruned_list) >= max_parents:
7633                                                 break
7634                                         parent, atom = parent_atom
7635                                         if parent not in conflict_pkgs:
7636                                                 pruned_list.add(parent_atom)
7637
7638                                 for parent_atom in parent_atoms:
7639                                         if len(pruned_list) >= max_parents:
7640                                                 break
7641                                         pruned_list.add(parent_atom)
7642
7643                                 omitted_parents = len(parent_atoms) - len(pruned_list)
7644                                 msg.append(indent + "%s pulled in by\n" % pkg)
7645
7646                                 for parent_atom in pruned_list:
7647                                         parent, atom = parent_atom
7648                                         msg.append(2*indent)
7649                                         if isinstance(parent,
7650                                                 (PackageArg, AtomArg)):
7651                                                 # For PackageArg and AtomArg types, it's
7652                                                 # redundant to display the atom attribute.
7653                                                 msg.append(str(parent))
7654                                         else:
7655                                                 # Display the specific atom from SetArg or
7656                                                 # Package types.
7657                                                 msg.append("%s required by %s" % (atom, parent))
7658                                         msg.append("\n")
7659
7660                                 if omitted_parents:
7661                                         msg.append(2*indent)
7662                                         msg.append("(and %d more)\n" % omitted_parents)
7663
7664                                 msg.append("\n")
7665
7666                         sys.stderr.write("".join(msg))
7667                         sys.stderr.flush()
7668
7669                 if "--quiet" not in self.myopts:
7670                         show_blocker_docs_link()
7671
7672         def display(self, mylist, favorites=[], verbosity=None):
7673
7674                 # This is used to prevent display_problems() from
7675                 # redundantly displaying this exact same merge list
7676                 # again via _show_merge_list().
7677                 self._displayed_list = mylist
7678
7679                 if verbosity is None:
7680                         verbosity = ("--quiet" in self.myopts and 1 or \
7681                                 "--verbose" in self.myopts and 3 or 2)
7682                 favorites_set = InternalPackageSet(favorites)
7683                 oneshot = "--oneshot" in self.myopts or \
7684                         "--onlydeps" in self.myopts
7685                 columns = "--columns" in self.myopts
7686                 changelogs=[]
7687                 p=[]
7688                 blockers = []
7689
7690                 counters = PackageCounters()
7691
7692                 if verbosity == 1 and "--verbose" not in self.myopts:
7693                         def create_use_string(*args):
7694                                 return ""
7695                 else:
7696                         def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7697                                 old_iuse, old_use,
7698                                 is_new, reinst_flags,
7699                                 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7700                                 alphabetical=("--alphabetical" in self.myopts)):
7701                                 enabled = []
7702                                 if alphabetical:
7703                                         disabled = enabled
7704                                         removed = enabled
7705                                 else:
7706                                         disabled = []
7707                                         removed = []
7708                                 cur_iuse = set(cur_iuse)
7709                                 enabled_flags = cur_iuse.intersection(cur_use)
7710                                 removed_iuse = set(old_iuse).difference(cur_iuse)
7711                                 any_iuse = cur_iuse.union(old_iuse)
7712                                 any_iuse = list(any_iuse)
7713                                 any_iuse.sort()
7714                                 for flag in any_iuse:
7715                                         flag_str = None
7716                                         isEnabled = False
7717                                         reinst_flag = reinst_flags and flag in reinst_flags
7718                                         if flag in enabled_flags:
7719                                                 isEnabled = True
7720                                                 if is_new or flag in old_use and \
7721                                                         (all_flags or reinst_flag):
7722                                                         flag_str = red(flag)
7723                                                 elif flag not in old_iuse:
7724                                                         flag_str = yellow(flag) + "%*"
7725                                                 elif flag not in old_use:
7726                                                         flag_str = green(flag) + "*"
7727                                         elif flag in removed_iuse:
7728                                                 if all_flags or reinst_flag:
7729                                                         flag_str = yellow("-" + flag) + "%"
7730                                                         if flag in old_use:
7731                                                                 flag_str += "*"
7732                                                         flag_str = "(" + flag_str + ")"
7733                                                         removed.append(flag_str)
7734                                                 continue
7735                                         else:
7736                                                 if is_new or flag in old_iuse and \
7737                                                         flag not in old_use and \
7738                                                         (all_flags or reinst_flag):
7739                                                         flag_str = blue("-" + flag)
7740                                                 elif flag not in old_iuse:
7741                                                         flag_str = yellow("-" + flag)
7742                                                         if flag not in iuse_forced:
7743                                                                 flag_str += "%"
7744                                                 elif flag in old_use:
7745                                                         flag_str = green("-" + flag) + "*"
7746                                         if flag_str:
7747                                                 if flag in iuse_forced:
7748                                                         flag_str = "(" + flag_str + ")"
7749                                                 if isEnabled:
7750                                                         enabled.append(flag_str)
7751                                                 else:
7752                                                         disabled.append(flag_str)
7753
7754                                 if alphabetical:
7755                                         ret = " ".join(enabled)
7756                                 else:
7757                                         ret = " ".join(enabled + disabled + removed)
7758                                 if ret:
7759                                         ret = '%s="%s" ' % (name, ret)
7760                                 return ret
7761
7762                 repo_display = RepoDisplay(self.roots)
7763
7764                 tree_nodes = []
7765                 display_list = []
7766                 mygraph = self.digraph.copy()
7767
7768                 # If there are any Uninstall instances, add the corresponding
7769                 # blockers to the digraph (useful for --tree display).
7770
7771                 executed_uninstalls = set(node for node in mylist \
7772                         if isinstance(node, Package) and node.operation == "unmerge")
7773
7774                 for uninstall in self._blocker_uninstalls.leaf_nodes():
7775                         uninstall_parents = \
7776                                 self._blocker_uninstalls.parent_nodes(uninstall)
7777                         if not uninstall_parents:
7778                                 continue
7779
7780                         # Remove the corresponding "nomerge" node and substitute
7781                         # the Uninstall node.
7782                         inst_pkg = self._pkg_cache[
7783                                 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7784                         try:
7785                                 mygraph.remove(inst_pkg)
7786                         except KeyError:
7787                                 pass
7788
7789                         try:
7790                                 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7791                         except KeyError:
7792                                 inst_pkg_blockers = []
7793
7794                         # Break the Package -> Uninstall edges.
7795                         mygraph.remove(uninstall)
7796
7797                         # Resolution of a package's blockers
7798                         # depend on it's own uninstallation.
7799                         for blocker in inst_pkg_blockers:
7800                                 mygraph.add(uninstall, blocker)
7801
7802                         # Expand Package -> Uninstall edges into
7803                         # Package -> Blocker -> Uninstall edges.
7804                         for blocker in uninstall_parents:
7805                                 mygraph.add(uninstall, blocker)
7806                                 for parent in self._blocker_parents.parent_nodes(blocker):
7807                                         if parent != inst_pkg:
7808                                                 mygraph.add(blocker, parent)
7809
7810                         # If the uninstall task did not need to be executed because
7811                         # of an upgrade, display Blocker -> Upgrade edges since the
7812                         # corresponding Blocker -> Uninstall edges will not be shown.
7813                         upgrade_node = \
7814                                 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7815                         if upgrade_node is not None and \
7816                                 uninstall not in executed_uninstalls:
7817                                 for blocker in uninstall_parents:
7818                                         mygraph.add(upgrade_node, blocker)
7819
7820                 unsatisfied_blockers = []
7821                 i = 0
7822                 depth = 0
7823                 shown_edges = set()
7824                 for x in mylist:
7825                         if isinstance(x, Blocker) and not x.satisfied:
7826                                 unsatisfied_blockers.append(x)
7827                                 continue
7828                         graph_key = x
7829                         if "--tree" in self.myopts:
7830                                 depth = len(tree_nodes)
7831                                 while depth and graph_key not in \
7832                                         mygraph.child_nodes(tree_nodes[depth-1]):
7833                                                 depth -= 1
7834                                 if depth:
7835                                         tree_nodes = tree_nodes[:depth]
7836                                         tree_nodes.append(graph_key)
7837                                         display_list.append((x, depth, True))
7838                                         shown_edges.add((graph_key, tree_nodes[depth-1]))
7839                                 else:
7840                                         traversed_nodes = set() # prevent endless circles
7841                                         traversed_nodes.add(graph_key)
7842                                         def add_parents(current_node, ordered):
7843                                                 parent_nodes = None
7844                                                 # Do not traverse to parents if this node is an
7845                                                 # an argument or a direct member of a set that has
7846                                                 # been specified as an argument (system or world).
7847                                                 if current_node not in self._set_nodes:
7848                                                         parent_nodes = mygraph.parent_nodes(current_node)
7849                                                 if parent_nodes:
7850                                                         child_nodes = set(mygraph.child_nodes(current_node))
7851                                                         selected_parent = None
7852                                                         # First, try to avoid a direct cycle.
7853                                                         for node in parent_nodes:
7854                                                                 if not isinstance(node, (Blocker, Package)):
7855                                                                         continue
7856                                                                 if node not in traversed_nodes and \
7857                                                                         node not in child_nodes:
7858                                                                         edge = (current_node, node)
7859                                                                         if edge in shown_edges:
7860                                                                                 continue
7861                                                                         selected_parent = node
7862                                                                         break
7863                                                         if not selected_parent:
7864                                                                 # A direct cycle is unavoidable.
7865                                                                 for node in parent_nodes:
7866                                                                         if not isinstance(node, (Blocker, Package)):
7867                                                                                 continue
7868                                                                         if node not in traversed_nodes:
7869                                                                                 edge = (current_node, node)
7870                                                                                 if edge in shown_edges:
7871                                                                                         continue
7872                                                                                 selected_parent = node
7873                                                                                 break
7874                                                         if selected_parent:
7875                                                                 shown_edges.add((current_node, selected_parent))
7876                                                                 traversed_nodes.add(selected_parent)
7877                                                                 add_parents(selected_parent, False)
7878                                                 display_list.append((current_node,
7879                                                         len(tree_nodes), ordered))
7880                                                 tree_nodes.append(current_node)
7881                                         tree_nodes = []
7882                                         add_parents(graph_key, True)
7883                         else:
7884                                 display_list.append((x, depth, True))
7885                 mylist = display_list
7886                 for x in unsatisfied_blockers:
7887                         mylist.append((x, 0, True))
7888
7889                 last_merge_depth = 0
7890                 for i in xrange(len(mylist)-1,-1,-1):
7891                         graph_key, depth, ordered = mylist[i]
7892                         if not ordered and depth == 0 and i > 0 \
7893                                 and graph_key == mylist[i-1][0] and \
7894                                 mylist[i-1][1] == 0:
7895                                 # An ordered node got a consecutive duplicate when the tree was
7896                                 # being filled in.
7897                                 del mylist[i]
7898                                 continue
7899                         if ordered and graph_key[-1] != "nomerge":
7900                                 last_merge_depth = depth
7901                                 continue
7902                         if depth >= last_merge_depth or \
7903                                 i < len(mylist) - 1 and \
7904                                 depth >= mylist[i+1][1]:
7905                                         del mylist[i]
7906
7907                 from portage import flatten
7908                 from portage.dep import use_reduce, paren_reduce
7909                 # files to fetch list - avoids counting a same file twice
7910                 # in size display (verbose mode)
7911                 myfetchlist=[]
7912
7913                 # Use this set to detect when all the "repoadd" strings are "[0]"
7914                 # and disable the entire repo display in this case.
7915                 repoadd_set = set()
7916
7917                 for mylist_index in xrange(len(mylist)):
7918                         x, depth, ordered = mylist[mylist_index]
7919                         pkg_type = x[0]
7920                         myroot = x[1]
7921                         pkg_key = x[2]
7922                         portdb = self.trees[myroot]["porttree"].dbapi
7923                         bindb  = self.trees[myroot]["bintree"].dbapi
7924                         vardb = self.trees[myroot]["vartree"].dbapi
7925                         vartree = self.trees[myroot]["vartree"]
7926                         pkgsettings = self.pkgsettings[myroot]
7927
7928                         fetch=" "
7929                         indent = " " * depth
7930
7931                         if isinstance(x, Blocker):
7932                                 if x.satisfied:
7933                                         blocker_style = "PKG_BLOCKER_SATISFIED"
7934                                         addl = "%s  %s  " % (colorize(blocker_style, "b"), fetch)
7935                                 else:
7936                                         blocker_style = "PKG_BLOCKER"
7937                                         addl = "%s  %s  " % (colorize(blocker_style, "B"), fetch)
7938                                 if ordered:
7939                                         counters.blocks += 1
7940                                         if x.satisfied:
7941                                                 counters.blocks_satisfied += 1
7942                                 resolved = portage.key_expand(
7943                                         str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
7944                                 if "--columns" in self.myopts and "--quiet" in self.myopts:
7945                                         addl += " " + colorize(blocker_style, resolved)
7946                                 else:
7947                                         addl = "[%s %s] %s%s" % \
7948                                                 (colorize(blocker_style, "blocks"),
7949                                                 addl, indent, colorize(blocker_style, resolved))
7950                                 block_parents = self._blocker_parents.parent_nodes(x)
7951                                 block_parents = set([pnode[2] for pnode in block_parents])
7952                                 block_parents = ", ".join(block_parents)
7953                                 if resolved!=x[2]:
7954                                         addl += colorize(blocker_style,
7955                                                 " (\"%s\" is blocking %s)") % \
7956                                                 (str(x.atom).lstrip("!"), block_parents)
7957                                 else:
7958                                         addl += colorize(blocker_style,
7959                                                 " (is blocking %s)") % block_parents
7960                                 if isinstance(x, Blocker) and x.satisfied:
7961                                         if columns:
7962                                                 continue
7963                                         p.append(addl)
7964                                 else:
7965                                         blockers.append(addl)
7966                         else:
7967                                 pkg_status = x[3]
7968                                 pkg_merge = ordered and pkg_status == "merge"
7969                                 if not pkg_merge and pkg_status == "merge":
7970                                         pkg_status = "nomerge"
7971                                 built = pkg_type != "ebuild"
7972                                 installed = pkg_type == "installed"
7973                                 pkg = x
7974                                 metadata = pkg.metadata
7975                                 ebuild_path = None
7976                                 repo_name = metadata["repository"]
7977                                 if pkg_type == "ebuild":
7978                                         ebuild_path = portdb.findname(pkg_key)
7979                                         if not ebuild_path: # shouldn't happen
7980                                                 raise portage.exception.PackageNotFound(pkg_key)
7981                                         repo_path_real = os.path.dirname(os.path.dirname(
7982                                                 os.path.dirname(ebuild_path)))
7983                                 else:
7984                                         repo_path_real = portdb.getRepositoryPath(repo_name)
7985                                 pkg_use = list(pkg.use.enabled)
7986                                 try:
7987                                         restrict = flatten(use_reduce(paren_reduce(
7988                                                 pkg.metadata["RESTRICT"]), uselist=pkg_use))
7989                                 except portage.exception.InvalidDependString, e:
7990                                         if not pkg.installed:
7991                                                 show_invalid_depstring_notice(x,
7992                                                         pkg.metadata["RESTRICT"], str(e))
7993                                                 del e
7994                                                 return 1
7995                                         restrict = []
7996                                 if "ebuild" == pkg_type and x[3] != "nomerge" and \
7997                                         "fetch" in restrict:
7998                                         fetch = red("F")
7999                                         if ordered:
8000                                                 counters.restrict_fetch += 1
8001                                         if portdb.fetch_check(pkg_key, pkg_use):
8002                                                 fetch = green("f")
8003                                                 if ordered:
8004                                                         counters.restrict_fetch_satisfied += 1
8005
8006                                 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
8007                                 #param is used for -u, where you still *do* want to see when something is being upgraded.
8008                                 myoldbest = []
8009                                 myinslotlist = None
8010                                 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
8011                                 if vardb.cpv_exists(pkg_key):
8012                                         addl="  "+yellow("R")+fetch+"  "
8013                                         if ordered:
8014                                                 if pkg_merge:
8015                                                         counters.reinst += 1
8016                                                 elif pkg_status == "uninstall":
8017                                                         counters.uninst += 1
8018                                 # filter out old-style virtual matches
8019                                 elif installed_versions and \
8020                                         portage.cpv_getkey(installed_versions[0]) == \
8021                                         portage.cpv_getkey(pkg_key):
8022                                         myinslotlist = vardb.match(pkg.slot_atom)
8023                                         # If this is the first install of a new-style virtual, we
8024                                         # need to filter out old-style virtual matches.
8025                                         if myinslotlist and \
8026                                                 portage.cpv_getkey(myinslotlist[0]) != \
8027                                                 portage.cpv_getkey(pkg_key):
8028                                                 myinslotlist = None
8029                                         if myinslotlist:
8030                                                 myoldbest = myinslotlist[:]
8031                                                 addl = "   " + fetch
8032                                                 if not portage.dep.cpvequal(pkg_key,
8033                                                         portage.best([pkg_key] + myoldbest)):
8034                                                         # Downgrade in slot
8035                                                         addl += turquoise("U")+blue("D")
8036                                                         if ordered:
8037                                                                 counters.downgrades += 1
8038                                                 else:
8039                                                         # Update in slot
8040                                                         addl += turquoise("U") + " "
8041                                                         if ordered:
8042                                                                 counters.upgrades += 1
8043                                         else:
8044                                                 # New slot, mark it new.
8045                                                 addl = " " + green("NS") + fetch + "  "
8046                                                 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
8047                                                 if ordered:
8048                                                         counters.newslot += 1
8049
8050                                         if "--changelog" in self.myopts:
8051                                                 inst_matches = vardb.match(pkg.slot_atom)
8052                                                 if inst_matches:
8053                                                         changelogs.extend(self.calc_changelog(
8054                                                                 portdb.findname(pkg_key),
8055                                                                 inst_matches[0], pkg_key))
8056                                 else:
8057                                         addl = " " + green("N") + " " + fetch + "  "
8058                                         if ordered:
8059                                                 counters.new += 1
8060
8061                                 verboseadd = ""
8062                                 repoadd = None
8063
8064                                 if True:
8065                                         # USE flag display
8066                                         forced_flags = set()
8067                                         pkgsettings.setcpv(pkg) # for package.use.{mask,force}
8068                                         forced_flags.update(pkgsettings.useforce)
8069                                         forced_flags.update(pkgsettings.usemask)
8070
8071                                         cur_use = [flag for flag in pkg.use.enabled \
8072                                                 if flag in pkg.iuse.all]
8073                                         cur_iuse = sorted(pkg.iuse.all)
8074
8075                                         if myoldbest and myinslotlist:
8076                                                 previous_cpv = myoldbest[0]
8077                                         else:
8078                                                 previous_cpv = pkg.cpv
8079                                         if vardb.cpv_exists(previous_cpv):
8080                                                 old_iuse, old_use = vardb.aux_get(
8081                                                                 previous_cpv, ["IUSE", "USE"])
8082                                                 old_iuse = list(set(
8083                                                         filter_iuse_defaults(old_iuse.split())))
8084                                                 old_iuse.sort()
8085                                                 old_use = old_use.split()
8086                                                 is_new = False
8087                                         else:
8088                                                 old_iuse = []
8089                                                 old_use = []
8090                                                 is_new = True
8091
8092                                         old_use = [flag for flag in old_use if flag in old_iuse]
8093
8094                                         use_expand = pkgsettings["USE_EXPAND"].lower().split()
8095                                         use_expand.sort()
8096                                         use_expand.reverse()
8097                                         use_expand_hidden = \
8098                                                 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
8099
8100                                         def map_to_use_expand(myvals, forcedFlags=False,
8101                                                 removeHidden=True):
8102                                                 ret = {}
8103                                                 forced = {}
8104                                                 for exp in use_expand:
8105                                                         ret[exp] = []
8106                                                         forced[exp] = set()
8107                                                         for val in myvals[:]:
8108                                                                 if val.startswith(exp.lower()+"_"):
8109                                                                         if val in forced_flags:
8110                                                                                 forced[exp].add(val[len(exp)+1:])
8111                                                                         ret[exp].append(val[len(exp)+1:])
8112                                                                         myvals.remove(val)
8113                                                 ret["USE"] = myvals
8114                                                 forced["USE"] = [val for val in myvals \
8115                                                         if val in forced_flags]
8116                                                 if removeHidden:
8117                                                         for exp in use_expand_hidden:
8118                                                                 ret.pop(exp, None)
8119                                                 if forcedFlags:
8120                                                         return ret, forced
8121                                                 return ret
8122
8123                                         # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
8124                                         # are the only thing that triggered reinstallation.
8125                                         reinst_flags_map = {}
8126                                         reinstall_for_flags = self._reinstall_nodes.get(pkg)
8127                                         reinst_expand_map = None
8128                                         if reinstall_for_flags:
8129                                                 reinst_flags_map = map_to_use_expand(
8130                                                         list(reinstall_for_flags), removeHidden=False)
8131                                                 for k in list(reinst_flags_map):
8132                                                         if not reinst_flags_map[k]:
8133                                                                 del reinst_flags_map[k]
8134                                                 if not reinst_flags_map.get("USE"):
8135                                                         reinst_expand_map = reinst_flags_map.copy()
8136                                                         reinst_expand_map.pop("USE", None)
8137                                         if reinst_expand_map and \
8138                                                 not set(reinst_expand_map).difference(
8139                                                 use_expand_hidden):
8140                                                 use_expand_hidden = \
8141                                                         set(use_expand_hidden).difference(
8142                                                         reinst_expand_map)
8143
8144                                         cur_iuse_map, iuse_forced = \
8145                                                 map_to_use_expand(cur_iuse, forcedFlags=True)
8146                                         cur_use_map = map_to_use_expand(cur_use)
8147                                         old_iuse_map = map_to_use_expand(old_iuse)
8148                                         old_use_map = map_to_use_expand(old_use)
8149
8150                                         use_expand.sort()
8151                                         use_expand.insert(0, "USE")
8152                                         
8153                                         for key in use_expand:
8154                                                 if key in use_expand_hidden:
8155                                                         continue
8156                                                 verboseadd += create_use_string(key.upper(),
8157                                                         cur_iuse_map[key], iuse_forced[key],
8158                                                         cur_use_map[key], old_iuse_map[key],
8159                                                         old_use_map[key], is_new,
8160                                                         reinst_flags_map.get(key))
8161
8162                                 if verbosity == 3:
8163                                         # size verbose
8164                                         mysize=0
8165                                         if pkg_type == "ebuild" and pkg_merge:
8166                                                 try:
8167                                                         myfilesdict = portdb.getfetchsizes(pkg_key,
8168                                                                 useflags=pkg_use, debug=self.edebug)
8169                                                 except portage.exception.InvalidDependString, e:
8170                                                         src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
8171                                                         show_invalid_depstring_notice(x, src_uri, str(e))
8172                                                         del e
8173                                                         return 1
8174                                                 if myfilesdict is None:
8175                                                         myfilesdict="[empty/missing/bad digest]"
8176                                                 else:
8177                                                         for myfetchfile in myfilesdict:
8178                                                                 if myfetchfile not in myfetchlist:
8179                                                                         mysize+=myfilesdict[myfetchfile]
8180                                                                         myfetchlist.append(myfetchfile)
8181                                                         if ordered:
8182                                                                 counters.totalsize += mysize
8183                                                 verboseadd += format_size(mysize)
8184
8185                                         # overlay verbose
8186                                         # assign index for a previous version in the same slot
8187                                         has_previous = False
8188                                         repo_name_prev = None
8189                                         slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
8190                                                 metadata["SLOT"])
8191                                         slot_matches = vardb.match(slot_atom)
8192                                         if slot_matches:
8193                                                 has_previous = True
8194                                                 repo_name_prev = vardb.aux_get(slot_matches[0],
8195                                                         ["repository"])[0]
8196
8197                                         # now use the data to generate output
8198                                         if pkg.installed or not has_previous:
8199                                                 repoadd = repo_display.repoStr(repo_path_real)
8200                                         else:
8201                                                 repo_path_prev = None
8202                                                 if repo_name_prev:
8203                                                         repo_path_prev = portdb.getRepositoryPath(
8204                                                                 repo_name_prev)
8205                                                 if repo_path_prev == repo_path_real:
8206                                                         repoadd = repo_display.repoStr(repo_path_real)
8207                                                 else:
8208                                                         repoadd = "%s=>%s" % (
8209                                                                 repo_display.repoStr(repo_path_prev),
8210                                                                 repo_display.repoStr(repo_path_real))
8211                                         if repoadd:
8212                                                 repoadd_set.add(repoadd)
8213
8214                                 xs = [portage.cpv_getkey(pkg_key)] + \
8215                                         list(portage.catpkgsplit(pkg_key)[2:])
8216                                 if xs[2] == "r0":
8217                                         xs[2] = ""
8218                                 else:
8219                                         xs[2] = "-" + xs[2]
8220
8221                                 mywidth = 130
8222                                 if "COLUMNWIDTH" in self.settings:
8223                                         try:
8224                                                 mywidth = int(self.settings["COLUMNWIDTH"])
8225                                         except ValueError, e:
8226                                                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8227                                                 portage.writemsg(
8228                                                         "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
8229                                                         self.settings["COLUMNWIDTH"], noiselevel=-1)
8230                                                 del e
8231                                 oldlp = mywidth - 30
8232                                 newlp = oldlp - 30
8233
8234                                 # Convert myoldbest from a list to a string.
8235                                 if not myoldbest:
8236                                         myoldbest = ""
8237                                 else:
8238                                         for pos, key in enumerate(myoldbest):
8239                                                 key = portage.catpkgsplit(key)[2] + \
8240                                                         "-" + portage.catpkgsplit(key)[3]
8241                                                 if key[-3:] == "-r0":
8242                                                         key = key[:-3]
8243                                                 myoldbest[pos] = key
8244                                         myoldbest = blue("["+", ".join(myoldbest)+"]")
8245
8246                                 pkg_cp = xs[0]
8247                                 root_config = self.roots[myroot]
8248                                 system_set = root_config.sets["system"]
8249                                 world_set  = root_config.sets["world"]
8250
8251                                 pkg_system = False
8252                                 pkg_world = False
8253                                 try:
8254                                         pkg_system = system_set.findAtomForPackage(pkg)
8255                                         pkg_world  = world_set.findAtomForPackage(pkg)
8256                                         if not (oneshot or pkg_world) and \
8257                                                 myroot == self.target_root and \
8258                                                 favorites_set.findAtomForPackage(pkg):
8259                                                 # Maybe it will be added to world now.
8260                                                 if create_world_atom(pkg, favorites_set, root_config):
8261                                                         pkg_world = True
8262                                 except portage.exception.InvalidDependString:
8263                                         # This is reported elsewhere if relevant.
8264                                         pass
8265
8266                                 def pkgprint(pkg_str):
8267                                         if pkg_merge:
8268                                                 if pkg_system:
8269                                                         return colorize("PKG_MERGE_SYSTEM", pkg_str)
8270                                                 elif pkg_world:
8271                                                         return colorize("PKG_MERGE_WORLD", pkg_str)
8272                                                 else:
8273                                                         return colorize("PKG_MERGE", pkg_str)
8274                                         elif pkg_status == "uninstall":
8275                                                 return colorize("PKG_UNINSTALL", pkg_str)
8276                                         else:
8277                                                 if pkg_system:
8278                                                         return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8279                                                 elif pkg_world:
8280                                                         return colorize("PKG_NOMERGE_WORLD", pkg_str)
8281                                                 else:
8282                                                         return colorize("PKG_NOMERGE", pkg_str)
8283
8284                                 try:
8285                                         properties = flatten(use_reduce(paren_reduce(
8286                                                 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8287                                 except portage.exception.InvalidDependString, e:
8288                                         if not pkg.installed:
8289                                                 show_invalid_depstring_notice(pkg,
8290                                                         pkg.metadata["PROPERTIES"], str(e))
8291                                                 del e
8292                                                 return 1
8293                                         properties = []
8294                                 interactive = "interactive" in properties
8295                                 if interactive and pkg.operation == "merge":
8296                                         addl = colorize("WARN", "I") + addl[1:]
8297                                         if ordered:
8298                                                 counters.interactive += 1
8299
8300                                 if x[1]!="/":
8301                                         if myoldbest:
8302                                                 myoldbest +=" "
8303                                         if "--columns" in self.myopts:
8304                                                 if "--quiet" in self.myopts:
8305                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8306                                                         myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8307                                                         myprint=myprint+myoldbest
8308                                                         myprint=myprint+darkgreen("to "+x[1])
8309                                                         verboseadd = None
8310                                                 else:
8311                                                         if not pkg_merge:
8312                                                                 myprint = "[%s] %s%s" % \
8313                                                                         (pkgprint(pkg_status.ljust(13)),
8314                                                                         indent, pkgprint(pkg.cp))
8315                                                         else:
8316                                                                 myprint = "[%s %s] %s%s" % \
8317                                                                         (pkgprint(pkg.type_name), addl,
8318                                                                         indent, pkgprint(pkg.cp))
8319                                                         if (newlp-nc_len(myprint)) > 0:
8320                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8321                                                         myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8322                                                         if (oldlp-nc_len(myprint)) > 0:
8323                                                                 myprint=myprint+" "*(oldlp-nc_len(myprint))
8324                                                         myprint=myprint+myoldbest
8325                                                         myprint += darkgreen("to " + pkg.root)
8326                                         else:
8327                                                 if not pkg_merge:
8328                                                         myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8329                                                 else:
8330                                                         myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
8331                                                 myprint += indent + pkgprint(pkg_key) + " " + \
8332                                                         myoldbest + darkgreen("to " + myroot)
8333                                 else:
8334                                         if "--columns" in self.myopts:
8335                                                 if "--quiet" in self.myopts:
8336                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8337                                                         myprint=myprint+" "+green(xs[1]+xs[2])+" "
8338                                                         myprint=myprint+myoldbest
8339                                                         verboseadd = None
8340                                                 else:
8341                                                         if not pkg_merge:
8342                                                                 myprint = "[%s] %s%s" % \
8343                                                                         (pkgprint(pkg_status.ljust(13)),
8344                                                                         indent, pkgprint(pkg.cp))
8345                                                         else:
8346                                                                 myprint = "[%s %s] %s%s" % \
8347                                                                         (pkgprint(pkg.type_name), addl,
8348                                                                         indent, pkgprint(pkg.cp))
8349                                                         if (newlp-nc_len(myprint)) > 0:
8350                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8351                                                         myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8352                                                         if (oldlp-nc_len(myprint)) > 0:
8353                                                                 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8354                                                         myprint += myoldbest
8355                                         else:
8356                                                 if not pkg_merge:
8357                                                         myprint = "[%s] %s%s %s" % \
8358                                                                 (pkgprint(pkg_status.ljust(13)),
8359                                                                 indent, pkgprint(pkg.cpv),
8360                                                                 myoldbest)
8361                                                 else:
8362                                                         myprint = "[%s %s] %s%s %s" % \
8363                                                                 (pkgprint(pkg_type), addl, indent,
8364                                                                 pkgprint(pkg.cpv), myoldbest)
8365
8366                                 if columns and pkg.operation == "uninstall":
8367                                         continue
8368                                 p.append((myprint, verboseadd, repoadd))
8369
8370                                 if "--tree" not in self.myopts and \
8371                                         "--quiet" not in self.myopts and \
8372                                         not self._opts_no_restart.intersection(self.myopts) and \
8373                                         pkg.root == self._running_root.root and \
8374                                         portage.match_from_list(
8375                                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8376                                         not vardb.cpv_exists(pkg.cpv) and \
8377                                         "--quiet" not in self.myopts:
8378                                                 if mylist_index < len(mylist) - 1:
8379                                                         p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8380                                                         p.append(colorize("WARN", "    then resume the merge."))
8381
8382                 out = sys.stdout
8383                 show_repos = repoadd_set and repoadd_set != set(["0"])
8384
8385                 for x in p:
8386                         if isinstance(x, basestring):
8387                                 out.write("%s\n" % (x,))
8388                                 continue
8389
8390                         myprint, verboseadd, repoadd = x
8391
8392                         if verboseadd:
8393                                 myprint += " " + verboseadd
8394
8395                         if show_repos and repoadd:
8396                                 myprint += " " + teal("[%s]" % repoadd)
8397
8398                         out.write("%s\n" % (myprint,))
8399
8400                 for x in blockers:
8401                         print x
8402
8403                 if verbosity == 3:
8404                         print
8405                         print counters
8406                         if show_repos:
8407                                 sys.stdout.write(str(repo_display))
8408
8409                 if "--changelog" in self.myopts:
8410                         print
8411                         for revision,text in changelogs:
8412                                 print bold('*'+revision)
8413                                 sys.stdout.write(text)
8414
8415                 sys.stdout.flush()
8416                 return os.EX_OK
8417
8418         def display_problems(self):
8419                 """
8420                 Display problems with the dependency graph such as slot collisions.
8421                 This is called internally by display() to show the problems _after_
8422                 the merge list where it is most likely to be seen, but if display()
8423                 is not going to be called then this method should be called explicitly
8424                 to ensure that the user is notified of problems with the graph.
8425
8426                 All output goes to stderr, except for unsatisfied dependencies which
8427                 go to stdout for parsing by programs such as autounmask.
8428                 """
8429
8430                 # Note that show_masked_packages() sends it's output to
8431                 # stdout, and some programs such as autounmask parse the
8432                 # output in cases when emerge bails out. However, when
8433                 # show_masked_packages() is called for installed packages
8434                 # here, the message is a warning that is more appropriate
8435                 # to send to stderr, so temporarily redirect stdout to
8436                 # stderr. TODO: Fix output code so there's a cleaner way
8437                 # to redirect everything to stderr.
8438                 sys.stdout.flush()
8439                 sys.stderr.flush()
8440                 stdout = sys.stdout
8441                 try:
8442                         sys.stdout = sys.stderr
8443                         self._display_problems()
8444                 finally:
8445                         sys.stdout = stdout
8446                         sys.stdout.flush()
8447                         sys.stderr.flush()
8448
8449                 # This goes to stdout for parsing by programs like autounmask.
8450                 for pargs, kwargs in self._unsatisfied_deps_for_display:
8451                         self._show_unsatisfied_dep(*pargs, **kwargs)
8452
8453         def _display_problems(self):
8454                 if self._circular_deps_for_display is not None:
8455                         self._show_circular_deps(
8456                                 self._circular_deps_for_display)
8457
8458                 # The user is only notified of a slot conflict if
8459                 # there are no unresolvable blocker conflicts.
8460                 if self._unsatisfied_blockers_for_display is not None:
8461                         self._show_unsatisfied_blockers(
8462                                 self._unsatisfied_blockers_for_display)
8463                 else:
8464                         self._show_slot_collision_notice()
8465
8466                 # TODO: Add generic support for "set problem" handlers so that
8467                 # the below warnings aren't special cases for world only.
8468
8469                 if self._missing_args:
8470                         world_problems = False
8471                         if "world" in self._sets:
8472                                 # Filter out indirect members of world (from nested sets)
8473                                 # since only direct members of world are desired here.
8474                                 world_set = self.roots[self.target_root].sets["world"]
8475                                 for arg, atom in self._missing_args:
8476                                         if arg.name == "world" and atom in world_set:
8477                                                 world_problems = True
8478                                                 break
8479
8480                         if world_problems:
8481                                 sys.stderr.write("\n!!! Problems have been " + \
8482                                         "detected with your world file\n")
8483                                 sys.stderr.write("!!! Please run " + \
8484                                         green("emaint --check world")+"\n\n")
8485
8486                 if self._missing_args:
8487                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8488                                 " Ebuilds for the following packages are either all\n")
8489                         sys.stderr.write(colorize("BAD", "!!!") + \
8490                                 " masked or don't exist:\n")
8491                         sys.stderr.write(" ".join(str(atom) for arg, atom in \
8492                                 self._missing_args) + "\n")
8493
8494                 if self._pprovided_args:
8495                         arg_refs = {}
8496                         for arg, atom in self._pprovided_args:
8497                                 if isinstance(arg, SetArg):
8498                                         parent = arg.name
8499                                         arg_atom = (atom, atom)
8500                                 else:
8501                                         parent = "args"
8502                                         arg_atom = (arg.arg, atom)
8503                                 refs = arg_refs.setdefault(arg_atom, [])
8504                                 if parent not in refs:
8505                                         refs.append(parent)
8506                         msg = []
8507                         msg.append(bad("\nWARNING: "))
8508                         if len(self._pprovided_args) > 1:
8509                                 msg.append("Requested packages will not be " + \
8510                                         "merged because they are listed in\n")
8511                         else:
8512                                 msg.append("A requested package will not be " + \
8513                                         "merged because it is listed in\n")
8514                         msg.append("package.provided:\n\n")
8515                         problems_sets = set()
8516                         for (arg, atom), refs in arg_refs.iteritems():
8517                                 ref_string = ""
8518                                 if refs:
8519                                         problems_sets.update(refs)
8520                                         refs.sort()
8521                                         ref_string = ", ".join(["'%s'" % name for name in refs])
8522                                         ref_string = " pulled in by " + ref_string
8523                                 msg.append("  %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8524                         msg.append("\n")
8525                         if "world" in problems_sets:
8526                                 msg.append("This problem can be solved in one of the following ways:\n\n")
8527                                 msg.append("  A) Use emaint to clean offending packages from world (if not installed).\n")
8528                                 msg.append("  B) Uninstall offending packages (cleans them from world).\n")
8529                                 msg.append("  C) Remove offending entries from package.provided.\n\n")
8530                                 msg.append("The best course of action depends on the reason that an offending\n")
8531                                 msg.append("package.provided entry exists.\n\n")
8532                         sys.stderr.write("".join(msg))
8533
8534                 masked_packages = []
8535                 for pkg in self._masked_installed:
8536                         root_config = pkg.root_config
8537                         pkgsettings = self.pkgsettings[pkg.root]
8538                         mreasons = get_masking_status(pkg, pkgsettings, root_config)
8539                         masked_packages.append((root_config, pkgsettings,
8540                                 pkg.cpv, pkg.metadata, mreasons))
8541                 if masked_packages:
8542                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8543                                 " The following installed packages are masked:\n")
8544                         show_masked_packages(masked_packages)
8545                         show_mask_docs()
8546                         print
8547
8548         def calc_changelog(self,ebuildpath,current,next):
8549                 if ebuildpath == None or not os.path.exists(ebuildpath):
8550                         return []
8551                 current = '-'.join(portage.catpkgsplit(current)[1:])
8552                 if current.endswith('-r0'):
8553                         current = current[:-3]
8554                 next = '-'.join(portage.catpkgsplit(next)[1:])
8555                 if next.endswith('-r0'):
8556                         next = next[:-3]
8557                 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8558                 try:
8559                         changelog = open(changelogpath).read()
8560                 except SystemExit, e:
8561                         raise # Needed else can't exit
8562                 except:
8563                         return []
8564                 divisions = self.find_changelog_tags(changelog)
8565                 #print 'XX from',current,'to',next
8566                 #for div,text in divisions: print 'XX',div
8567                 # skip entries for all revisions above the one we are about to emerge
8568                 for i in range(len(divisions)):
8569                         if divisions[i][0]==next:
8570                                 divisions = divisions[i:]
8571                                 break
8572                 # find out how many entries we are going to display
8573                 for i in range(len(divisions)):
8574                         if divisions[i][0]==current:
8575                                 divisions = divisions[:i]
8576                                 break
8577                 else:
8578                     # couldnt find the current revision in the list. display nothing
8579                         return []
8580                 return divisions
8581
8582         def find_changelog_tags(self,changelog):
8583                 divs = []
8584                 release = None
8585                 while 1:
8586                         match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8587                         if match is None:
8588                                 if release is not None:
8589                                         divs.append((release,changelog))
8590                                 return divs
8591                         if release is not None:
8592                                 divs.append((release,changelog[:match.start()]))
8593                         changelog = changelog[match.end():]
8594                         release = match.group(1)
8595                         if release.endswith('.ebuild'):
8596                                 release = release[:-7]
8597                         if release.endswith('-r0'):
8598                                 release = release[:-3]
8599
8600         def saveNomergeFavorites(self):
8601                 """Find atoms in favorites that are not in the mergelist and add them
8602                 to the world file if necessary."""
8603                 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8604                         "--oneshot", "--onlydeps", "--pretend"):
8605                         if x in self.myopts:
8606                                 return
8607                 root_config = self.roots[self.target_root]
8608                 world_set = root_config.sets["world"]
8609
8610                 world_locked = False
8611                 if hasattr(world_set, "lock"):
8612                         world_set.lock()
8613                         world_locked = True
8614
8615                 if hasattr(world_set, "load"):
8616                         world_set.load() # maybe it's changed on disk
8617
8618                 args_set = self._sets["args"]
8619                 portdb = self.trees[self.target_root]["porttree"].dbapi
8620                 added_favorites = set()
8621                 for x in self._set_nodes:
8622                         pkg_type, root, pkg_key, pkg_status = x
8623                         if pkg_status != "nomerge":
8624                                 continue
8625
8626                         try:
8627                                 myfavkey = create_world_atom(x, args_set, root_config)
8628                                 if myfavkey:
8629                                         if myfavkey in added_favorites:
8630                                                 continue
8631                                         added_favorites.add(myfavkey)
8632                         except portage.exception.InvalidDependString, e:
8633                                 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8634                                         (pkg_key, str(e)), noiselevel=-1)
8635                                 writemsg("!!! see '%s'\n\n" % os.path.join(
8636                                         root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8637                                 del e
8638                 all_added = []
8639                 for k in self._sets:
8640                         if k in ("args", "world") or not root_config.sets[k].world_candidate:
8641                                 continue
8642                         s = SETPREFIX + k
8643                         if s in world_set:
8644                                 continue
8645                         all_added.append(SETPREFIX + k)
8646                 all_added.extend(added_favorites)
8647                 all_added.sort()
8648                 for a in all_added:
8649                         print ">>> Recording %s in \"world\" favorites file..." % \
8650                                 colorize("INFORM", str(a))
8651                 if all_added:
8652                         world_set.update(all_added)
8653
8654                 if world_locked:
8655                         world_set.unlock()
8656
8657         def loadResumeCommand(self, resume_data, skip_masked=False):
8658                 """
8659                 Add a resume command to the graph and validate it in the process.  This
8660                 will raise a PackageNotFound exception if a package is not available.
8661                 """
8662
8663                 if not isinstance(resume_data, dict):
8664                         return False
8665
8666                 mergelist = resume_data.get("mergelist")
8667                 if not isinstance(mergelist, list):
8668                         mergelist = []
8669
8670                 fakedb = self.mydbapi
8671                 trees = self.trees
8672                 serialized_tasks = []
8673                 masked_tasks = []
8674                 for x in mergelist:
8675                         if not (isinstance(x, list) and len(x) == 4):
8676                                 continue
8677                         pkg_type, myroot, pkg_key, action = x
8678                         if pkg_type not in self.pkg_tree_map:
8679                                 continue
8680                         if action != "merge":
8681                                 continue
8682                         tree_type = self.pkg_tree_map[pkg_type]
8683                         mydb = trees[myroot][tree_type].dbapi
8684                         db_keys = list(self._trees_orig[myroot][
8685                                 tree_type].dbapi._aux_cache_keys)
8686                         try:
8687                                 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8688                         except KeyError:
8689                                 # It does no exist or it is corrupt.
8690                                 if action == "uninstall":
8691                                         continue
8692                                 raise portage.exception.PackageNotFound(pkg_key)
8693                         installed = action == "uninstall"
8694                         built = pkg_type != "ebuild"
8695                         root_config = self.roots[myroot]
8696                         pkg = Package(built=built, cpv=pkg_key,
8697                                 installed=installed, metadata=metadata,
8698                                 operation=action, root_config=root_config,
8699                                 type_name=pkg_type)
8700                         if pkg_type == "ebuild":
8701                                 pkgsettings = self.pkgsettings[myroot]
8702                                 pkgsettings.setcpv(pkg)
8703                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8704                                 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
8705                         self._pkg_cache[pkg] = pkg
8706
8707                         root_config = self.roots[pkg.root]
8708                         if "merge" == pkg.operation and \
8709                                 not visible(root_config.settings, pkg):
8710                                 if skip_masked:
8711                                         masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8712                                 else:
8713                                         self._unsatisfied_deps_for_display.append(
8714                                                 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8715
8716                         fakedb[myroot].cpv_inject(pkg)
8717                         serialized_tasks.append(pkg)
8718                         self.spinner.update()
8719
8720                 if self._unsatisfied_deps_for_display:
8721                         return False
8722
8723                 if not serialized_tasks or "--nodeps" in self.myopts:
8724                         self._serialized_tasks_cache = serialized_tasks
8725                         self._scheduler_graph = self.digraph
8726                 else:
8727                         self._select_package = self._select_pkg_from_graph
8728                         self.myparams.add("selective")
8729                         # Always traverse deep dependencies in order to account for
8730                         # potentially unsatisfied dependencies of installed packages.
8731                         # This is necessary for correct --keep-going or --resume operation
8732                         # in case a package from a group of circularly dependent packages
8733                         # fails. In this case, a package which has recently been installed
8734                         # may have an unsatisfied circular dependency (pulled in by
8735                         # PDEPEND, for example). So, even though a package is already
8736                         # installed, it may not have all of it's dependencies satisfied, so
8737                         # it may not be usable. If such a package is in the subgraph of
8738                         # deep depenedencies of a scheduled build, that build needs to
8739                         # be cancelled. In order for this type of situation to be
8740                         # recognized, deep traversal of dependencies is required.
8741                         self.myparams.add("deep")
8742
8743                         favorites = resume_data.get("favorites")
8744                         args_set = self._sets["args"]
8745                         if isinstance(favorites, list):
8746                                 args = self._load_favorites(favorites)
8747                         else:
8748                                 args = []
8749
8750                         for task in serialized_tasks:
8751                                 if isinstance(task, Package) and \
8752                                         task.operation == "merge":
8753                                         if not self._add_pkg(task, None):
8754                                                 return False
8755
8756                         # Packages for argument atoms need to be explicitly
8757                         # added via _add_pkg() so that they are included in the
8758                         # digraph (needed at least for --tree display).
8759                         for arg in args:
8760                                 for atom in arg.set:
8761                                         pkg, existing_node = self._select_package(
8762                                                 arg.root_config.root, atom)
8763                                         if existing_node is None and \
8764                                                 pkg is not None:
8765                                                 if not self._add_pkg(pkg, Dependency(atom=atom,
8766                                                         root=pkg.root, parent=arg)):
8767                                                         return False
8768
8769                         # Allow unsatisfied deps here to avoid showing a masking
8770                         # message for an unsatisfied dep that isn't necessarily
8771                         # masked.
8772                         if not self._create_graph(allow_unsatisfied=True):
8773                                 return False
8774
8775                         unsatisfied_deps = []
8776                         for dep in self._unsatisfied_deps:
8777                                 if not isinstance(dep.parent, Package):
8778                                         continue
8779                                 if dep.parent.operation == "merge":
8780                                         unsatisfied_deps.append(dep)
8781                                         continue
8782
8783                                 # For unsatisfied deps of installed packages, only account for
8784                                 # them if they are in the subgraph of dependencies of a package
8785                                 # which is scheduled to be installed.
8786                                 unsatisfied_install = False
8787                                 traversed = set()
8788                                 dep_stack = self.digraph.parent_nodes(dep.parent)
8789                                 while dep_stack:
8790                                         node = dep_stack.pop()
8791                                         if not isinstance(node, Package):
8792                                                 continue
8793                                         if node.operation == "merge":
8794                                                 unsatisfied_install = True
8795                                                 break
8796                                         if node in traversed:
8797                                                 continue
8798                                         traversed.add(node)
8799                                         dep_stack.extend(self.digraph.parent_nodes(node))
8800
8801                                 if unsatisfied_install:
8802                                         unsatisfied_deps.append(dep)
8803
8804                         if masked_tasks or unsatisfied_deps:
8805                                 # This probably means that a required package
8806                                 # was dropped via --skipfirst. It makes the
8807                                 # resume list invalid, so convert it to a
8808                                 # UnsatisfiedResumeDep exception.
8809                                 raise self.UnsatisfiedResumeDep(self,
8810                                         masked_tasks + unsatisfied_deps)
8811                         self._serialized_tasks_cache = None
8812                         try:
8813                                 self.altlist()
8814                         except self._unknown_internal_error:
8815                                 return False
8816
8817                 return True
8818
8819         def _load_favorites(self, favorites):
8820                 """
8821                 Use a list of favorites to resume state from a
8822                 previous select_files() call. This creates similar
8823                 DependencyArg instances to those that would have
8824                 been created by the original select_files() call.
8825                 This allows Package instances to be matched with
8826                 DependencyArg instances during graph creation.
8827                 """
8828                 root_config = self.roots[self.target_root]
8829                 getSetAtoms = root_config.setconfig.getSetAtoms
8830                 sets = root_config.sets
8831                 args = []
8832                 for x in favorites:
8833                         if not isinstance(x, basestring):
8834                                 continue
8835                         if x in ("system", "world"):
8836                                 x = SETPREFIX + x
8837                         if x.startswith(SETPREFIX):
8838                                 s = x[len(SETPREFIX):]
8839                                 if s not in sets:
8840                                         continue
8841                                 if s in self._sets:
8842                                         continue
8843                                 # Recursively expand sets so that containment tests in
8844                                 # self._get_parent_sets() properly match atoms in nested
8845                                 # sets (like if world contains system).
8846                                 expanded_set = InternalPackageSet(
8847                                         initial_atoms=getSetAtoms(s))
8848                                 self._sets[s] = expanded_set
8849                                 args.append(SetArg(arg=x, set=expanded_set,
8850                                         root_config=root_config))
8851                         else:
8852                                 if not portage.isvalidatom(x):
8853                                         continue
8854                                 args.append(AtomArg(arg=x, atom=x,
8855                                         root_config=root_config))
8856
8857                 self._set_args(args)
8858                 return args
8859
8860         class UnsatisfiedResumeDep(portage.exception.PortageException):
8861                 """
8862                 A dependency of a resume list is not installed. This
8863                 can occur when a required package is dropped from the
8864                 merge list via --skipfirst.
8865                 """
8866                 def __init__(self, depgraph, value):
8867                         portage.exception.PortageException.__init__(self, value)
8868                         self.depgraph = depgraph
8869
8870         class _internal_exception(portage.exception.PortageException):
8871                 def __init__(self, value=""):
8872                         portage.exception.PortageException.__init__(self, value)
8873
8874         class _unknown_internal_error(_internal_exception):
8875                 """
8876                 Used by the depgraph internally to terminate graph creation.
8877                 The specific reason for the failure should have been dumped
8878                 to stderr, unfortunately, the exact reason for the failure
8879                 may not be known.
8880                 """
8881
8882         class _serialize_tasks_retry(_internal_exception):
8883                 """
8884                 This is raised by the _serialize_tasks() method when it needs to
8885                 be called again for some reason. The only case that it's currently
8886                 used for is when neglected dependencies need to be added to the
8887                 graph in order to avoid making a potentially unsafe decision.
8888                 """
8889
8890         class _dep_check_composite_db(portage.dbapi):
8891                 """
8892                 A dbapi-like interface that is optimized for use in dep_check() calls.
8893                 This is built on top of the existing depgraph package selection logic.
8894                 Some packages that have been added to the graph may be masked from this
8895                 view in order to influence the atom preference selection that occurs
8896                 via dep_check().
8897                 """
8898                 def __init__(self, depgraph, root):
8899                         portage.dbapi.__init__(self)
8900                         self._depgraph = depgraph
8901                         self._root = root
8902                         self._match_cache = {}
8903                         self._cpv_pkg_map = {}
8904
8905                 def _clear_cache(self):
8906                         self._match_cache.clear()
8907                         self._cpv_pkg_map.clear()
8908
8909                 def match(self, atom):
8910                         ret = self._match_cache.get(atom)
8911                         if ret is not None:
8912                                 return ret[:]
8913                         orig_atom = atom
8914                         if "/" not in atom:
8915                                 atom = self._dep_expand(atom)
8916                         pkg, existing = self._depgraph._select_package(self._root, atom)
8917                         if not pkg:
8918                                 ret = []
8919                         else:
8920                                 # Return the highest available from select_package() as well as
8921                                 # any matching slots in the graph db.
8922                                 slots = set()
8923                                 slots.add(pkg.metadata["SLOT"])
8924                                 atom_cp = portage.dep_getkey(atom)
8925                                 if pkg.cp.startswith("virtual/"):
8926                                         # For new-style virtual lookahead that occurs inside
8927                                         # dep_check(), examine all slots. This is needed
8928                                         # so that newer slots will not unnecessarily be pulled in
8929                                         # when a satisfying lower slot is already installed. For
8930                                         # example, if virtual/jdk-1.4 is satisfied via kaffe then
8931                                         # there's no need to pull in a newer slot to satisfy a
8932                                         # virtual/jdk dependency.
8933                                         for db, pkg_type, built, installed, db_keys in \
8934                                                 self._depgraph._filtered_trees[self._root]["dbs"]:
8935                                                 for cpv in db.match(atom):
8936                                                         if portage.cpv_getkey(cpv) != pkg.cp:
8937                                                                 continue
8938                                                         slots.add(db.aux_get(cpv, ["SLOT"])[0])
8939                                 ret = []
8940                                 if self._visible(pkg):
8941                                         self._cpv_pkg_map[pkg.cpv] = pkg
8942                                         ret.append(pkg.cpv)
8943                                 slots.remove(pkg.metadata["SLOT"])
8944                                 while slots:
8945                                         slot_atom = "%s:%s" % (atom_cp, slots.pop())
8946                                         pkg, existing = self._depgraph._select_package(
8947                                                 self._root, slot_atom)
8948                                         if not pkg:
8949                                                 continue
8950                                         if not self._visible(pkg):
8951                                                 continue
8952                                         self._cpv_pkg_map[pkg.cpv] = pkg
8953                                         ret.append(pkg.cpv)
8954                                 if ret:
8955                                         self._cpv_sort_ascending(ret)
8956                         self._match_cache[orig_atom] = ret
8957                         return ret[:]
8958
8959                 def _visible(self, pkg):
8960                         if pkg.installed and "selective" not in self._depgraph.myparams:
8961                                 try:
8962                                         arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
8963                                 except (StopIteration, portage.exception.InvalidDependString):
8964                                         arg = None
8965                                 if arg:
8966                                         return False
8967                         if pkg.installed:
8968                                 try:
8969                                         if not visible(
8970                                                 self._depgraph.pkgsettings[pkg.root], pkg):
8971                                                 return False
8972                                 except portage.exception.InvalidDependString:
8973                                         pass
8974                         in_graph = self._depgraph._slot_pkg_map[
8975                                 self._root].get(pkg.slot_atom)
8976                         if in_graph is None:
8977                                 # Mask choices for packages which are not the highest visible
8978                                 # version within their slot (since they usually trigger slot
8979                                 # conflicts).
8980                                 highest_visible, in_graph = self._depgraph._select_package(
8981                                         self._root, pkg.slot_atom)
8982                                 if pkg != highest_visible:
8983                                         return False
8984                         elif in_graph != pkg:
8985                                 # Mask choices for packages that would trigger a slot
8986                                 # conflict with a previously selected package.
8987                                 return False
8988                         return True
8989
8990                 def _dep_expand(self, atom):
8991                         """
8992                         This is only needed for old installed packages that may
8993                         contain atoms that are not fully qualified with a specific
8994                         category. Emulate the cpv_expand() function that's used by
8995                         dbapi.match() in cases like this. If there are multiple
8996                         matches, it's often due to a new-style virtual that has
8997                         been added, so try to filter those out to avoid raising
8998                         a ValueError.
8999                         """
9000                         root_config = self._depgraph.roots[self._root]
9001                         orig_atom = atom
9002                         expanded_atoms = self._depgraph._dep_expand(root_config, atom)
9003                         if len(expanded_atoms) > 1:
9004                                 non_virtual_atoms = []
9005                                 for x in expanded_atoms:
9006                                         if not portage.dep_getkey(x).startswith("virtual/"):
9007                                                 non_virtual_atoms.append(x)
9008                                 if len(non_virtual_atoms) == 1:
9009                                         expanded_atoms = non_virtual_atoms
9010                         if len(expanded_atoms) > 1:
9011                                 # compatible with portage.cpv_expand()
9012                                 raise portage.exception.AmbiguousPackageName(
9013                                         [portage.dep_getkey(x) for x in expanded_atoms])
9014                         if expanded_atoms:
9015                                 atom = expanded_atoms[0]
9016                         else:
9017                                 null_atom = insert_category_into_atom(atom, "null")
9018                                 null_cp = portage.dep_getkey(null_atom)
9019                                 cat, atom_pn = portage.catsplit(null_cp)
9020                                 virts_p = root_config.settings.get_virts_p().get(atom_pn)
9021                                 if virts_p:
9022                                         # Allow the resolver to choose which virtual.
9023                                         atom = insert_category_into_atom(atom, "virtual")
9024                                 else:
9025                                         atom = insert_category_into_atom(atom, "null")
9026                         return atom
9027
9028                 def aux_get(self, cpv, wants):
9029                         metadata = self._cpv_pkg_map[cpv].metadata
9030                         return [metadata.get(x, "") for x in wants]
9031
9032 class RepoDisplay(object):
9033         def __init__(self, roots):
9034                 self._shown_repos = {}
9035                 self._unknown_repo = False
9036                 repo_paths = set()
9037                 for root_config in roots.itervalues():
9038                         portdir = root_config.settings.get("PORTDIR")
9039                         if portdir:
9040                                 repo_paths.add(portdir)
9041                         overlays = root_config.settings.get("PORTDIR_OVERLAY")
9042                         if overlays:
9043                                 repo_paths.update(overlays.split())
9044                 repo_paths = list(repo_paths)
9045                 self._repo_paths = repo_paths
9046                 self._repo_paths_real = [ os.path.realpath(repo_path) \
9047                         for repo_path in repo_paths ]
9048
9049                 # pre-allocate index for PORTDIR so that it always has index 0.
9050                 for root_config in roots.itervalues():
9051                         portdb = root_config.trees["porttree"].dbapi
9052                         portdir = portdb.porttree_root
9053                         if portdir:
9054                                 self.repoStr(portdir)
9055
9056         def repoStr(self, repo_path_real):
9057                 real_index = -1
9058                 if repo_path_real:
9059                         real_index = self._repo_paths_real.index(repo_path_real)
9060                 if real_index == -1:
9061                         s = "?"
9062                         self._unknown_repo = True
9063                 else:
9064                         shown_repos = self._shown_repos
9065                         repo_paths = self._repo_paths
9066                         repo_path = repo_paths[real_index]
9067                         index = shown_repos.get(repo_path)
9068                         if index is None:
9069                                 index = len(shown_repos)
9070                                 shown_repos[repo_path] = index
9071                         s = str(index)
9072                 return s
9073
9074         def __str__(self):
9075                 output = []
9076                 shown_repos = self._shown_repos
9077                 unknown_repo = self._unknown_repo
9078                 if shown_repos or self._unknown_repo:
9079                         output.append("Portage tree and overlays:\n")
9080                 show_repo_paths = list(shown_repos)
9081                 for repo_path, repo_index in shown_repos.iteritems():
9082                         show_repo_paths[repo_index] = repo_path
9083                 if show_repo_paths:
9084                         for index, repo_path in enumerate(show_repo_paths):
9085                                 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9086                 if unknown_repo:
9087                         output.append(" "+teal("[?]") + \
9088                                 " indicates that the source repository could not be determined\n")
9089                 return "".join(output)
9090
9091 class PackageCounters(object):
9092
9093         def __init__(self):
9094                 self.upgrades   = 0
9095                 self.downgrades = 0
9096                 self.new        = 0
9097                 self.newslot    = 0
9098                 self.reinst     = 0
9099                 self.uninst     = 0
9100                 self.blocks     = 0
9101                 self.blocks_satisfied         = 0
9102                 self.totalsize  = 0
9103                 self.restrict_fetch           = 0
9104                 self.restrict_fetch_satisfied = 0
9105                 self.interactive              = 0
9106
9107         def __str__(self):
9108                 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
9109                 myoutput = []
9110                 details = []
9111                 myoutput.append("Total: %s package" % total_installs)
9112                 if total_installs != 1:
9113                         myoutput.append("s")
9114                 if total_installs != 0:
9115                         myoutput.append(" (")
9116                 if self.upgrades > 0:
9117                         details.append("%s upgrade" % self.upgrades)
9118                         if self.upgrades > 1:
9119                                 details[-1] += "s"
9120                 if self.downgrades > 0:
9121                         details.append("%s downgrade" % self.downgrades)
9122                         if self.downgrades > 1:
9123                                 details[-1] += "s"
9124                 if self.new > 0:
9125                         details.append("%s new" % self.new)
9126                 if self.newslot > 0:
9127                         details.append("%s in new slot" % self.newslot)
9128                         if self.newslot > 1:
9129                                 details[-1] += "s"
9130                 if self.reinst > 0:
9131                         details.append("%s reinstall" % self.reinst)
9132                         if self.reinst > 1:
9133                                 details[-1] += "s"
9134                 if self.uninst > 0:
9135                         details.append("%s uninstall" % self.uninst)
9136                         if self.uninst > 1:
9137                                 details[-1] += "s"
9138                 if self.interactive > 0:
9139                         details.append("%s %s" % (self.interactive,
9140                                 colorize("WARN", "interactive")))
9141                 myoutput.append(", ".join(details))
9142                 if total_installs != 0:
9143                         myoutput.append(")")
9144                 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
9145                 if self.restrict_fetch:
9146                         myoutput.append("\nFetch Restriction: %s package" % \
9147                                 self.restrict_fetch)
9148                         if self.restrict_fetch > 1:
9149                                 myoutput.append("s")
9150                 if self.restrict_fetch_satisfied < self.restrict_fetch:
9151                         myoutput.append(bad(" (%s unsatisfied)") % \
9152                                 (self.restrict_fetch - self.restrict_fetch_satisfied))
9153                 if self.blocks > 0:
9154                         myoutput.append("\nConflict: %s block" % \
9155                                 self.blocks)
9156                         if self.blocks > 1:
9157                                 myoutput.append("s")
9158                         if self.blocks_satisfied < self.blocks:
9159                                 myoutput.append(bad(" (%s unsatisfied)") % \
9160                                         (self.blocks - self.blocks_satisfied))
9161                 return "".join(myoutput)
9162
9163 class PollSelectAdapter(PollConstants):
9164
9165         """
9166         Use select to emulate a poll object, for
9167         systems that don't support poll().
9168         """
9169
9170         def __init__(self):
9171                 self._registered = {}
9172                 self._select_args = [[], [], []]
9173
9174         def register(self, fd, *args):
9175                 """
9176                 Only POLLIN is currently supported!
9177                 """
9178                 if len(args) > 1:
9179                         raise TypeError(
9180                                 "register expected at most 2 arguments, got " + \
9181                                 repr(1 + len(args)))
9182
9183                 eventmask = PollConstants.POLLIN | \
9184                         PollConstants.POLLPRI | PollConstants.POLLOUT
9185                 if args:
9186                         eventmask = args[0]
9187
9188                 self._registered[fd] = eventmask
9189                 self._select_args = None
9190
9191         def unregister(self, fd):
9192                 self._select_args = None
9193                 del self._registered[fd]
9194
9195         def poll(self, *args):
9196                 if len(args) > 1:
9197                         raise TypeError(
9198                                 "poll expected at most 2 arguments, got " + \
9199                                 repr(1 + len(args)))
9200
9201                 timeout = None
9202                 if args:
9203                         timeout = args[0]
9204
9205                 select_args = self._select_args
9206                 if select_args is None:
9207                         select_args = [self._registered.keys(), [], []]
9208
9209                 if timeout is not None:
9210                         select_args = select_args[:]
9211                         # Translate poll() timeout args to select() timeout args:
9212                         #
9213                         #          | units        | value(s) for indefinite block
9214                         # ---------|--------------|------------------------------
9215                         #   poll   | milliseconds | omitted, negative, or None
9216                         # ---------|--------------|------------------------------
9217                         #   select | seconds      | omitted
9218                         # ---------|--------------|------------------------------
9219
9220                         if timeout is not None and timeout < 0:
9221                                 timeout = None
9222                         if timeout is not None:
9223                                 select_args.append(timeout / 1000)
9224
9225                 select_events = select.select(*select_args)
9226                 poll_events = []
9227                 for fd in select_events[0]:
9228                         poll_events.append((fd, PollConstants.POLLIN))
9229                 return poll_events
9230
9231 class SequentialTaskQueue(SlotObject):
9232
9233         __slots__ = ("max_jobs", "running_tasks") + \
9234                 ("_dirty", "_scheduling", "_task_queue")
9235
9236         def __init__(self, **kwargs):
9237                 SlotObject.__init__(self, **kwargs)
9238                 self._task_queue = deque()
9239                 self.running_tasks = set()
9240                 if self.max_jobs is None:
9241                         self.max_jobs = 1
9242                 self._dirty = True
9243
9244         def add(self, task):
9245                 self._task_queue.append(task)
9246                 self._dirty = True
9247
9248         def addFront(self, task):
9249                 self._task_queue.appendleft(task)
9250                 self._dirty = True
9251
9252         def schedule(self):
9253
9254                 if not self._dirty:
9255                         return False
9256
9257                 if not self:
9258                         return False
9259
9260                 if self._scheduling:
9261                         # Ignore any recursive schedule() calls triggered via
9262                         # self._task_exit().
9263                         return False
9264
9265                 self._scheduling = True
9266
9267                 task_queue = self._task_queue
9268                 running_tasks = self.running_tasks
9269                 max_jobs = self.max_jobs
9270                 state_changed = False
9271
9272                 while task_queue and \
9273                         (max_jobs is True or len(running_tasks) < max_jobs):
9274                         task = task_queue.popleft()
9275                         cancelled = getattr(task, "cancelled", None)
9276                         if not cancelled:
9277                                 running_tasks.add(task)
9278                                 task.addExitListener(self._task_exit)
9279                                 task.start()
9280                         state_changed = True
9281
9282                 self._dirty = False
9283                 self._scheduling = False
9284
9285                 return state_changed
9286
9287         def _task_exit(self, task):
9288                 """
9289                 Since we can always rely on exit listeners being called, the set of
9290                 running tasks is always pruned automatically and there is never any need
9291                 to actively prune it.
9292                 """
9293                 self.running_tasks.remove(task)
9294                 if self._task_queue:
9295                         self._dirty = True
9296
9297         def clear(self):
9298                 self._task_queue.clear()
9299                 running_tasks = self.running_tasks
9300                 while running_tasks:
9301                         task = running_tasks.pop()
9302                         task.removeExitListener(self._task_exit)
9303                         task.cancel()
9304                 self._dirty = False
9305
9306         def __nonzero__(self):
9307                 return bool(self._task_queue or self.running_tasks)
9308
9309         def __len__(self):
9310                 return len(self._task_queue) + len(self.running_tasks)
9311
9312 _can_poll_device = None
9313
9314 def can_poll_device():
9315         """
9316         Test if it's possible to use poll() on a device such as a pty. This
9317         is known to fail on Darwin.
9318         @rtype: bool
9319         @returns: True if poll() on a device succeeds, False otherwise.
9320         """
9321
9322         global _can_poll_device
9323         if _can_poll_device is not None:
9324                 return _can_poll_device
9325
9326         if not hasattr(select, "poll"):
9327                 _can_poll_device = False
9328                 return _can_poll_device
9329
9330         try:
9331                 dev_null = open('/dev/null', 'rb')
9332         except IOError:
9333                 _can_poll_device = False
9334                 return _can_poll_device
9335
9336         p = select.poll()
9337         p.register(dev_null.fileno(), PollConstants.POLLIN)
9338
9339         invalid_request = False
9340         for f, event in p.poll():
9341                 if event & PollConstants.POLLNVAL:
9342                         invalid_request = True
9343                         break
9344         dev_null.close()
9345
9346         _can_poll_device = not invalid_request
9347         return _can_poll_device
9348
9349 def create_poll_instance():
9350         """
9351         Create an instance of select.poll, or an instance of
9352         PollSelectAdapter there is no poll() implementation or
9353         it is broken somehow.
9354         """
9355         if can_poll_device():
9356                 return select.poll()
9357         return PollSelectAdapter()
9358
9359 getloadavg = getattr(os, "getloadavg", None)
9360 if getloadavg is None:
9361         def getloadavg():
9362                 """
9363                 Uses /proc/loadavg to emulate os.getloadavg().
9364                 Raises OSError if the load average was unobtainable.
9365                 """
9366                 try:
9367                         loadavg_str = open('/proc/loadavg').readline()
9368                 except IOError:
9369                         # getloadavg() is only supposed to raise OSError, so convert
9370                         raise OSError('unknown')
9371                 loadavg_split = loadavg_str.split()
9372                 if len(loadavg_split) < 3:
9373                         raise OSError('unknown')
9374                 loadavg_floats = []
9375                 for i in xrange(3):
9376                         try:
9377                                 loadavg_floats.append(float(loadavg_split[i]))
9378                         except ValueError:
9379                                 raise OSError('unknown')
9380                 return tuple(loadavg_floats)
9381
9382 class PollScheduler(object):
9383
9384         class _sched_iface_class(SlotObject):
9385                 __slots__ = ("register", "schedule", "unregister")
9386
9387         def __init__(self):
9388                 self._max_jobs = 1
9389                 self._max_load = None
9390                 self._jobs = 0
9391                 self._poll_event_queue = []
9392                 self._poll_event_handlers = {}
9393                 self._poll_event_handler_ids = {}
9394                 # Increment id for each new handler.
9395                 self._event_handler_id = 0
9396                 self._poll_obj = create_poll_instance()
9397                 self._scheduling = False
9398
9399         def _schedule(self):
9400                 """
9401                 Calls _schedule_tasks() and automatically returns early from
9402                 any recursive calls to this method that the _schedule_tasks()
9403                 call might trigger. This makes _schedule() safe to call from
9404                 inside exit listeners.
9405                 """
9406                 if self._scheduling:
9407                         return False
9408                 self._scheduling = True
9409                 try:
9410                         return self._schedule_tasks()
9411                 finally:
9412                         self._scheduling = False
9413
9414         def _running_job_count(self):
9415                 return self._jobs
9416
9417         def _can_add_job(self):
9418                 max_jobs = self._max_jobs
9419                 max_load = self._max_load
9420
9421                 if self._max_jobs is not True and \
9422                         self._running_job_count() >= self._max_jobs:
9423                         return False
9424
9425                 if max_load is not None and \
9426                         (max_jobs is True or max_jobs > 1) and \
9427                         self._running_job_count() >= 1:
9428                         try:
9429                                 avg1, avg5, avg15 = getloadavg()
9430                         except OSError:
9431                                 return False
9432
9433                         if avg1 >= max_load:
9434                                 return False
9435
9436                 return True
9437
9438         def _poll(self, timeout=None):
9439                 """
9440                 All poll() calls pass through here. The poll events
9441                 are added directly to self._poll_event_queue.
9442                 In order to avoid endless blocking, this raises
9443                 StopIteration if timeout is None and there are
9444                 no file descriptors to poll.
9445                 """
9446                 if not self._poll_event_handlers:
9447                         self._schedule()
9448                         if timeout is None and \
9449                                 not self._poll_event_handlers:
9450                                 raise StopIteration(
9451                                         "timeout is None and there are no poll() event handlers")
9452
9453                 # The following error is known to occur with Linux kernel versions
9454                 # less than 2.6.24:
9455                 #
9456                 #   select.error: (4, 'Interrupted system call')
9457                 #
9458                 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9459                 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9460                 # without any events.
9461                 while True:
9462                         try:
9463                                 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9464                                 break
9465                         except select.error, e:
9466                                 writemsg_level("\n!!! select error: %s\n" % (e,),
9467                                         level=logging.ERROR, noiselevel=-1)
9468                                 del e
9469                                 if timeout is not None:
9470                                         break
9471
9472         def _next_poll_event(self, timeout=None):
9473                 """
9474                 Since the _schedule_wait() loop is called by event
9475                 handlers from _poll_loop(), maintain a central event
9476                 queue for both of them to share events from a single
9477                 poll() call. In order to avoid endless blocking, this
9478                 raises StopIteration if timeout is None and there are
9479                 no file descriptors to poll.
9480                 """
9481                 if not self._poll_event_queue:
9482                         self._poll(timeout)
9483                 return self._poll_event_queue.pop()
9484
9485         def _poll_loop(self):
9486
9487                 event_handlers = self._poll_event_handlers
9488                 event_handled = False
9489
9490                 try:
9491                         while event_handlers:
9492                                 f, event = self._next_poll_event()
9493                                 handler, reg_id = event_handlers[f]
9494                                 handler(f, event)
9495                                 event_handled = True
9496                 except StopIteration:
9497                         event_handled = True
9498
9499                 if not event_handled:
9500                         raise AssertionError("tight loop")
9501
9502         def _schedule_yield(self):
9503                 """
9504                 Schedule for a short period of time chosen by the scheduler based
9505                 on internal state. Synchronous tasks should call this periodically
9506                 in order to allow the scheduler to service pending poll events. The
9507                 scheduler will call poll() exactly once, without blocking, and any
9508                 resulting poll events will be serviced.
9509                 """
9510                 event_handlers = self._poll_event_handlers
9511                 events_handled = 0
9512
9513                 if not event_handlers:
9514                         return bool(events_handled)
9515
9516                 if not self._poll_event_queue:
9517                         self._poll(0)
9518
9519                 try:
9520                         while event_handlers and self._poll_event_queue:
9521                                 f, event = self._next_poll_event()
9522                                 handler, reg_id = event_handlers[f]
9523                                 handler(f, event)
9524                                 events_handled += 1
9525                 except StopIteration:
9526                         events_handled += 1
9527
9528                 return bool(events_handled)
9529
9530         def _register(self, f, eventmask, handler):
9531                 """
9532                 @rtype: Integer
9533                 @return: A unique registration id, for use in schedule() or
9534                         unregister() calls.
9535                 """
9536                 if f in self._poll_event_handlers:
9537                         raise AssertionError("fd %d is already registered" % f)
9538                 self._event_handler_id += 1
9539                 reg_id = self._event_handler_id
9540                 self._poll_event_handler_ids[reg_id] = f
9541                 self._poll_event_handlers[f] = (handler, reg_id)
9542                 self._poll_obj.register(f, eventmask)
9543                 return reg_id
9544
9545         def _unregister(self, reg_id):
9546                 f = self._poll_event_handler_ids[reg_id]
9547                 self._poll_obj.unregister(f)
9548                 del self._poll_event_handlers[f]
9549                 del self._poll_event_handler_ids[reg_id]
9550
9551         def _schedule_wait(self, wait_ids):
9552                 """
9553                 Schedule until wait_id is not longer registered
9554                 for poll() events.
9555                 @type wait_id: int
9556                 @param wait_id: a task id to wait for
9557                 """
9558                 event_handlers = self._poll_event_handlers
9559                 handler_ids = self._poll_event_handler_ids
9560                 event_handled = False
9561
9562                 if isinstance(wait_ids, int):
9563                         wait_ids = frozenset([wait_ids])
9564
9565                 try:
9566                         while wait_ids.intersection(handler_ids):
9567                                 f, event = self._next_poll_event()
9568                                 handler, reg_id = event_handlers[f]
9569                                 handler(f, event)
9570                                 event_handled = True
9571                 except StopIteration:
9572                         event_handled = True
9573
9574                 return event_handled
9575
9576 class QueueScheduler(PollScheduler):
9577
9578         """
9579         Add instances of SequentialTaskQueue and then call run(). The
9580         run() method returns when no tasks remain.
9581         """
9582
9583         def __init__(self, max_jobs=None, max_load=None):
9584                 PollScheduler.__init__(self)
9585
9586                 if max_jobs is None:
9587                         max_jobs = 1
9588
9589                 self._max_jobs = max_jobs
9590                 self._max_load = max_load
9591                 self.sched_iface = self._sched_iface_class(
9592                         register=self._register,
9593                         schedule=self._schedule_wait,
9594                         unregister=self._unregister)
9595
9596                 self._queues = []
9597                 self._schedule_listeners = []
9598
9599         def add(self, q):
9600                 self._queues.append(q)
9601
9602         def remove(self, q):
9603                 self._queues.remove(q)
9604
9605         def run(self):
9606
9607                 while self._schedule():
9608                         self._poll_loop()
9609
9610                 while self._running_job_count():
9611                         self._poll_loop()
9612
9613         def _schedule_tasks(self):
9614                 """
9615                 @rtype: bool
9616                 @returns: True if there may be remaining tasks to schedule,
9617                         False otherwise.
9618                 """
9619                 while self._can_add_job():
9620                         n = self._max_jobs - self._running_job_count()
9621                         if n < 1:
9622                                 break
9623
9624                         if not self._start_next_job(n):
9625                                 return False
9626
9627                 for q in self._queues:
9628                         if q:
9629                                 return True
9630                 return False
9631
9632         def _running_job_count(self):
9633                 job_count = 0
9634                 for q in self._queues:
9635                         job_count += len(q.running_tasks)
9636                 self._jobs = job_count
9637                 return job_count
9638
9639         def _start_next_job(self, n=1):
9640                 started_count = 0
9641                 for q in self._queues:
9642                         initial_job_count = len(q.running_tasks)
9643                         q.schedule()
9644                         final_job_count = len(q.running_tasks)
9645                         if final_job_count > initial_job_count:
9646                                 started_count += (final_job_count - initial_job_count)
9647                         if started_count >= n:
9648                                 break
9649                 return started_count
9650
9651 class TaskScheduler(object):
9652
9653         """
9654         A simple way to handle scheduling of AsynchrousTask instances. Simply
9655         add tasks and call run(). The run() method returns when no tasks remain.
9656         """
9657
9658         def __init__(self, max_jobs=None, max_load=None):
9659                 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9660                 self._scheduler = QueueScheduler(
9661                         max_jobs=max_jobs, max_load=max_load)
9662                 self.sched_iface = self._scheduler.sched_iface
9663                 self.run = self._scheduler.run
9664                 self._scheduler.add(self._queue)
9665
9666         def add(self, task):
9667                 self._queue.add(task)
9668
9669 class JobStatusDisplay(object):
9670
9671         _bound_properties = ("curval", "failed", "running")
9672         _jobs_column_width = 48
9673
9674         # Don't update the display unless at least this much
9675         # time has passed, in units of seconds.
9676         _min_display_latency = 2
9677
9678         _default_term_codes = {
9679                 'cr'  : '\r',
9680                 'el'  : '\x1b[K',
9681                 'nel' : '\n',
9682         }
9683
9684         _termcap_name_map = {
9685                 'carriage_return' : 'cr',
9686                 'clr_eol'         : 'el',
9687                 'newline'         : 'nel',
9688         }
9689
9690         def __init__(self, out=sys.stdout, quiet=False):
9691                 object.__setattr__(self, "out", out)
9692                 object.__setattr__(self, "quiet", quiet)
9693                 object.__setattr__(self, "maxval", 0)
9694                 object.__setattr__(self, "merges", 0)
9695                 object.__setattr__(self, "_changed", False)
9696                 object.__setattr__(self, "_displayed", False)
9697                 object.__setattr__(self, "_last_display_time", 0)
9698                 object.__setattr__(self, "width", 80)
9699                 self.reset()
9700
9701                 isatty = hasattr(out, "isatty") and out.isatty()
9702                 object.__setattr__(self, "_isatty", isatty)
9703                 if not isatty or not self._init_term():
9704                         term_codes = {}
9705                         for k, capname in self._termcap_name_map.iteritems():
9706                                 term_codes[k] = self._default_term_codes[capname]
9707                         object.__setattr__(self, "_term_codes", term_codes)
9708                 encoding = sys.getdefaultencoding()
9709                 for k, v in self._term_codes.items():
9710                         if not isinstance(v, str):
9711                                 self._term_codes[k] = v.decode(encoding, 'replace')
9712
9713         def _init_term(self):
9714                 """
9715                 Initialize term control codes.
9716                 @rtype: bool
9717                 @returns: True if term codes were successfully initialized,
9718                         False otherwise.
9719                 """
9720
9721                 term_type = os.environ.get("TERM", "vt100")
9722                 tigetstr = None
9723
9724                 try:
9725                         import curses
9726                         try:
9727                                 curses.setupterm(term_type, self.out.fileno())
9728                                 tigetstr = curses.tigetstr
9729                         except curses.error:
9730                                 pass
9731                 except ImportError:
9732                         pass
9733
9734                 if tigetstr is None:
9735                         return False
9736
9737                 term_codes = {}
9738                 for k, capname in self._termcap_name_map.iteritems():
9739                         code = tigetstr(capname)
9740                         if code is None:
9741                                 code = self._default_term_codes[capname]
9742                         term_codes[k] = code
9743                 object.__setattr__(self, "_term_codes", term_codes)
9744                 return True
9745
9746         def _format_msg(self, msg):
9747                 return ">>> %s" % msg
9748
9749         def _erase(self):
9750                 self.out.write(
9751                         self._term_codes['carriage_return'] + \
9752                         self._term_codes['clr_eol'])
9753                 self.out.flush()
9754                 self._displayed = False
9755
9756         def _display(self, line):
9757                 self.out.write(line)
9758                 self.out.flush()
9759                 self._displayed = True
9760
9761         def _update(self, msg):
9762
9763                 out = self.out
9764                 if not self._isatty:
9765                         out.write(self._format_msg(msg) + self._term_codes['newline'])
9766                         self.out.flush()
9767                         self._displayed = True
9768                         return
9769
9770                 if self._displayed:
9771                         self._erase()
9772
9773                 self._display(self._format_msg(msg))
9774
9775         def displayMessage(self, msg):
9776
9777                 was_displayed = self._displayed
9778
9779                 if self._isatty and self._displayed:
9780                         self._erase()
9781
9782                 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9783                 self.out.flush()
9784                 self._displayed = False
9785
9786                 if was_displayed:
9787                         self._changed = True
9788                         self.display()
9789
9790         def reset(self):
9791                 self.maxval = 0
9792                 self.merges = 0
9793                 for name in self._bound_properties:
9794                         object.__setattr__(self, name, 0)
9795
9796                 if self._displayed:
9797                         self.out.write(self._term_codes['newline'])
9798                         self.out.flush()
9799                         self._displayed = False
9800
9801         def __setattr__(self, name, value):
9802                 old_value = getattr(self, name)
9803                 if value == old_value:
9804                         return
9805                 object.__setattr__(self, name, value)
9806                 if name in self._bound_properties:
9807                         self._property_change(name, old_value, value)
9808
9809         def _property_change(self, name, old_value, new_value):
9810                 self._changed = True
9811                 self.display()
9812
9813         def _load_avg_str(self):
9814                 try:
9815                         avg = getloadavg()
9816                 except OSError:
9817                         return 'unknown'
9818
9819                 max_avg = max(avg)
9820
9821                 if max_avg < 10:
9822                         digits = 2
9823                 elif max_avg < 100:
9824                         digits = 1
9825                 else:
9826                         digits = 0
9827
9828                 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9829
9830         def display(self):
9831                 """
9832                 Display status on stdout, but only if something has
9833                 changed since the last call.
9834                 """
9835
9836                 if self.quiet:
9837                         return
9838
9839                 current_time = time.time()
9840                 time_delta = current_time - self._last_display_time
9841                 if self._displayed and \
9842                         not self._changed:
9843                         if not self._isatty:
9844                                 return
9845                         if time_delta < self._min_display_latency:
9846                                 return
9847
9848                 self._last_display_time = current_time
9849                 self._changed = False
9850                 self._display_status()
9851
9852         def _display_status(self):
9853                 # Don't use len(self._completed_tasks) here since that also
9854                 # can include uninstall tasks.
9855                 curval_str = str(self.curval)
9856                 maxval_str = str(self.maxval)
9857                 running_str = str(self.running)
9858                 failed_str = str(self.failed)
9859                 load_avg_str = self._load_avg_str()
9860
9861                 color_output = StringIO()
9862                 plain_output = StringIO()
9863                 style_file = portage.output.ConsoleStyleFile(color_output)
9864                 style_file.write_listener = plain_output
9865                 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9866                 style_writer.style_listener = style_file.new_styles
9867                 f = formatter.AbstractFormatter(style_writer)
9868
9869                 number_style = "INFORM"
9870                 f.add_literal_data("Jobs: ")
9871                 f.push_style(number_style)
9872                 f.add_literal_data(curval_str)
9873                 f.pop_style()
9874                 f.add_literal_data(" of ")
9875                 f.push_style(number_style)
9876                 f.add_literal_data(maxval_str)
9877                 f.pop_style()
9878                 f.add_literal_data(" complete")
9879
9880                 if self.running:
9881                         f.add_literal_data(", ")
9882                         f.push_style(number_style)
9883                         f.add_literal_data(running_str)
9884                         f.pop_style()
9885                         f.add_literal_data(" running")
9886
9887                 if self.failed:
9888                         f.add_literal_data(", ")
9889                         f.push_style(number_style)
9890                         f.add_literal_data(failed_str)
9891                         f.pop_style()
9892                         f.add_literal_data(" failed")
9893
9894                 padding = self._jobs_column_width - len(plain_output.getvalue())
9895                 if padding > 0:
9896                         f.add_literal_data(padding * " ")
9897
9898                 f.add_literal_data("Load avg: ")
9899                 f.add_literal_data(load_avg_str)
9900
9901                 # Truncate to fit width, to avoid making the terminal scroll if the
9902                 # line overflows (happens when the load average is large).
9903                 plain_output = plain_output.getvalue()
9904                 if self._isatty and len(plain_output) > self.width:
9905                         # Use plain_output here since it's easier to truncate
9906                         # properly than the color output which contains console
9907                         # color codes.
9908                         self._update(plain_output[:self.width])
9909                 else:
9910                         self._update(color_output.getvalue())
9911
9912                 xtermTitle(" ".join(plain_output.split()))
9913
9914 class Scheduler(PollScheduler):
9915
9916         _opts_ignore_blockers = \
9917                 frozenset(["--buildpkgonly",
9918                 "--fetchonly", "--fetch-all-uri",
9919                 "--nodeps", "--pretend"])
9920
9921         _opts_no_background = \
9922                 frozenset(["--pretend",
9923                 "--fetchonly", "--fetch-all-uri"])
9924
9925         _opts_no_restart = frozenset(["--buildpkgonly",
9926                 "--fetchonly", "--fetch-all-uri", "--pretend"])
9927
9928         _bad_resume_opts = set(["--ask", "--changelog",
9929                 "--resume", "--skipfirst"])
9930
9931         _fetch_log = "/var/log/emerge-fetch.log"
9932
9933         class _iface_class(SlotObject):
9934                 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
9935                         "dblinkElog", "fetch", "register", "schedule",
9936                         "scheduleSetup", "scheduleUnpack", "scheduleYield",
9937                         "unregister")
9938
9939         class _fetch_iface_class(SlotObject):
9940                 __slots__ = ("log_file", "schedule")
9941
9942         _task_queues_class = slot_dict_class(
9943                 ("merge", "jobs", "fetch", "unpack"), prefix="")
9944
9945         class _build_opts_class(SlotObject):
9946                 __slots__ = ("buildpkg", "buildpkgonly",
9947                         "fetch_all_uri", "fetchonly", "pretend")
9948
9949         class _binpkg_opts_class(SlotObject):
9950                 __slots__ = ("fetchonly", "getbinpkg", "pretend")
9951
9952         class _pkg_count_class(SlotObject):
9953                 __slots__ = ("curval", "maxval")
9954
9955         class _emerge_log_class(SlotObject):
9956                 __slots__ = ("xterm_titles",)
9957
9958                 def log(self, *pargs, **kwargs):
9959                         if not self.xterm_titles:
9960                                 # Avoid interference with the scheduler's status display.
9961                                 kwargs.pop("short_msg", None)
9962                         emergelog(self.xterm_titles, *pargs, **kwargs)
9963
9964         class _failed_pkg(SlotObject):
9965                 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
9966
9967         class _ConfigPool(object):
9968                 """Interface for a task to temporarily allocate a config
9969                 instance from a pool. This allows a task to be constructed
9970                 long before the config instance actually becomes needed, like
9971                 when prefetchers are constructed for the whole merge list."""
9972                 __slots__ = ("_root", "_allocate", "_deallocate")
9973                 def __init__(self, root, allocate, deallocate):
9974                         self._root = root
9975                         self._allocate = allocate
9976                         self._deallocate = deallocate
9977                 def allocate(self):
9978                         return self._allocate(self._root)
9979                 def deallocate(self, settings):
9980                         self._deallocate(settings)
9981
9982         class _unknown_internal_error(portage.exception.PortageException):
9983                 """
9984                 Used internally to terminate scheduling. The specific reason for
9985                 the failure should have been dumped to stderr.
9986                 """
9987                 def __init__(self, value=""):
9988                         portage.exception.PortageException.__init__(self, value)
9989
9990         def __init__(self, settings, trees, mtimedb, myopts,
9991                 spinner, mergelist, favorites, digraph):
9992                 PollScheduler.__init__(self)
9993                 self.settings = settings
9994                 self.target_root = settings["ROOT"]
9995                 self.trees = trees
9996                 self.myopts = myopts
9997                 self._spinner = spinner
9998                 self._mtimedb = mtimedb
9999                 self._mergelist = mergelist
10000                 self._favorites = favorites
10001                 self._args_set = InternalPackageSet(favorites)
10002                 self._build_opts = self._build_opts_class()
10003                 for k in self._build_opts.__slots__:
10004                         setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
10005                 self._binpkg_opts = self._binpkg_opts_class()
10006                 for k in self._binpkg_opts.__slots__:
10007                         setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
10008
10009                 self.curval = 0
10010                 self._logger = self._emerge_log_class()
10011                 self._task_queues = self._task_queues_class()
10012                 for k in self._task_queues.allowed_keys:
10013                         setattr(self._task_queues, k,
10014                                 SequentialTaskQueue())
10015
10016                 # Holds merges that will wait to be executed when no builds are
10017                 # executing. This is useful for system packages since dependencies
10018                 # on system packages are frequently unspecified.
10019                 self._merge_wait_queue = []
10020                 # Holds merges that have been transfered from the merge_wait_queue to
10021                 # the actual merge queue. They are removed from this list upon
10022                 # completion. Other packages can start building only when this list is
10023                 # empty.
10024                 self._merge_wait_scheduled = []
10025
10026                 # Holds system packages and their deep runtime dependencies. Before
10027                 # being merged, these packages go to merge_wait_queue, to be merged
10028                 # when no other packages are building.
10029                 self._deep_system_deps = set()
10030
10031                 # Holds packages to merge which will satisfy currently unsatisfied
10032                 # deep runtime dependencies of system packages. If this is not empty
10033                 # then no parallel builds will be spawned until it is empty. This
10034                 # minimizes the possibility that a build will fail due to the system
10035                 # being in a fragile state. For example, see bug #259954.
10036                 self._unsatisfied_system_deps = set()
10037
10038                 self._status_display = JobStatusDisplay()
10039                 self._max_load = myopts.get("--load-average")
10040                 max_jobs = myopts.get("--jobs")
10041                 if max_jobs is None:
10042                         max_jobs = 1
10043                 self._set_max_jobs(max_jobs)
10044
10045                 # The root where the currently running
10046                 # portage instance is installed.
10047                 self._running_root = trees["/"]["root_config"]
10048                 self.edebug = 0
10049                 if settings.get("PORTAGE_DEBUG", "") == "1":
10050                         self.edebug = 1
10051                 self.pkgsettings = {}
10052                 self._config_pool = {}
10053                 self._blocker_db = {}
10054                 for root in trees:
10055                         self._config_pool[root] = []
10056                         self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
10057
10058                 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
10059                         schedule=self._schedule_fetch)
10060                 self._sched_iface = self._iface_class(
10061                         dblinkEbuildPhase=self._dblink_ebuild_phase,
10062                         dblinkDisplayMerge=self._dblink_display_merge,
10063                         dblinkElog=self._dblink_elog,
10064                         fetch=fetch_iface, register=self._register,
10065                         schedule=self._schedule_wait,
10066                         scheduleSetup=self._schedule_setup,
10067                         scheduleUnpack=self._schedule_unpack,
10068                         scheduleYield=self._schedule_yield,
10069                         unregister=self._unregister)
10070
10071                 self._prefetchers = weakref.WeakValueDictionary()
10072                 self._pkg_queue = []
10073                 self._completed_tasks = set()
10074
10075                 self._failed_pkgs = []
10076                 self._failed_pkgs_all = []
10077                 self._failed_pkgs_die_msgs = []
10078                 self._post_mod_echo_msgs = []
10079                 self._parallel_fetch = False
10080                 merge_count = len([x for x in mergelist \
10081                         if isinstance(x, Package) and x.operation == "merge"])
10082                 self._pkg_count = self._pkg_count_class(
10083                         curval=0, maxval=merge_count)
10084                 self._status_display.maxval = self._pkg_count.maxval
10085
10086                 # The load average takes some time to respond when new
10087                 # jobs are added, so we need to limit the rate of adding
10088                 # new jobs.
10089                 self._job_delay_max = 10
10090                 self._job_delay_factor = 1.0
10091                 self._job_delay_exp = 1.5
10092                 self._previous_job_start_time = None
10093
10094                 self._set_digraph(digraph)
10095
10096                 # This is used to memoize the _choose_pkg() result when
10097                 # no packages can be chosen until one of the existing
10098                 # jobs completes.
10099                 self._choose_pkg_return_early = False
10100
10101                 features = self.settings.features
10102                 if "parallel-fetch" in features and \
10103                         not ("--pretend" in self.myopts or \
10104                         "--fetch-all-uri" in self.myopts or \
10105                         "--fetchonly" in self.myopts):
10106                         if "distlocks" not in features:
10107                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10108                                 portage.writemsg(red("!!!")+" parallel-fetching " + \
10109                                         "requires the distlocks feature enabled"+"\n",
10110                                         noiselevel=-1)
10111                                 portage.writemsg(red("!!!")+" you have it disabled, " + \
10112                                         "thus parallel-fetching is being disabled"+"\n",
10113                                         noiselevel=-1)
10114                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10115                         elif len(mergelist) > 1:
10116                                 self._parallel_fetch = True
10117
10118                 if self._parallel_fetch:
10119                                 # clear out existing fetch log if it exists
10120                                 try:
10121                                         open(self._fetch_log, 'w')
10122                                 except EnvironmentError:
10123                                         pass
10124
10125                 self._running_portage = None
10126                 portage_match = self._running_root.trees["vartree"].dbapi.match(
10127                         portage.const.PORTAGE_PACKAGE_ATOM)
10128                 if portage_match:
10129                         cpv = portage_match.pop()
10130                         self._running_portage = self._pkg(cpv, "installed",
10131                                 self._running_root, installed=True)
10132
10133         def _poll(self, timeout=None):
10134                 self._schedule()
10135                 PollScheduler._poll(self, timeout=timeout)
10136
10137         def _set_max_jobs(self, max_jobs):
10138                 self._max_jobs = max_jobs
10139                 self._task_queues.jobs.max_jobs = max_jobs
10140
10141         def _background_mode(self):
10142                 """
10143                 Check if background mode is enabled and adjust states as necessary.
10144
10145                 @rtype: bool
10146                 @returns: True if background mode is enabled, False otherwise.
10147                 """
10148                 background = (self._max_jobs is True or \
10149                         self._max_jobs > 1 or "--quiet" in self.myopts) and \
10150                         not bool(self._opts_no_background.intersection(self.myopts))
10151
10152                 if background:
10153                         interactive_tasks = self._get_interactive_tasks()
10154                         if interactive_tasks:
10155                                 background = False
10156                                 writemsg_level(">>> Sending package output to stdio due " + \
10157                                         "to interactive package(s):\n",
10158                                         level=logging.INFO, noiselevel=-1)
10159                                 msg = [""]
10160                                 for pkg in interactive_tasks:
10161                                         pkg_str = "  " + colorize("INFORM", str(pkg.cpv))
10162                                         if pkg.root != "/":
10163                                                 pkg_str += " for " + pkg.root
10164                                         msg.append(pkg_str)
10165                                 msg.append("")
10166                                 writemsg_level("".join("%s\n" % (l,) for l in msg),
10167                                         level=logging.INFO, noiselevel=-1)
10168                                 if self._max_jobs is True or self._max_jobs > 1:
10169                                         self._set_max_jobs(1)
10170                                         writemsg_level(">>> Setting --jobs=1 due " + \
10171                                                 "to the above interactive package(s)\n",
10172                                                 level=logging.INFO, noiselevel=-1)
10173
10174                 self._status_display.quiet = \
10175                         not background or \
10176                         ("--quiet" in self.myopts and \
10177                         "--verbose" not in self.myopts)
10178
10179                 self._logger.xterm_titles = \
10180                         "notitles" not in self.settings.features and \
10181                         self._status_display.quiet
10182
10183                 return background
10184
10185         def _get_interactive_tasks(self):
10186                 from portage import flatten
10187                 from portage.dep import use_reduce, paren_reduce
10188                 interactive_tasks = []
10189                 for task in self._mergelist:
10190                         if not (isinstance(task, Package) and \
10191                                 task.operation == "merge"):
10192                                 continue
10193                         try:
10194                                 properties = flatten(use_reduce(paren_reduce(
10195                                         task.metadata["PROPERTIES"]), uselist=task.use.enabled))
10196                         except portage.exception.InvalidDependString, e:
10197                                 show_invalid_depstring_notice(task,
10198                                         task.metadata["PROPERTIES"], str(e))
10199                                 raise self._unknown_internal_error()
10200                         if "interactive" in properties:
10201                                 interactive_tasks.append(task)
10202                 return interactive_tasks
10203
10204         def _set_digraph(self, digraph):
10205                 if "--nodeps" in self.myopts or \
10206                         (self._max_jobs is not True and self._max_jobs < 2):
10207                         # save some memory
10208                         self._digraph = None
10209                         return
10210
10211                 self._digraph = digraph
10212                 self._find_system_deps()
10213                 self._prune_digraph()
10214                 self._prevent_builddir_collisions()
10215
10216         def _find_system_deps(self):
10217                 """
10218                 Find system packages and their deep runtime dependencies. Before being
10219                 merged, these packages go to merge_wait_queue, to be merged when no
10220                 other packages are building.
10221                 """
10222                 deep_system_deps = self._deep_system_deps
10223                 deep_system_deps.clear()
10224                 deep_system_deps.update(
10225                         _find_deep_system_runtime_deps(self._digraph))
10226                 deep_system_deps.difference_update([pkg for pkg in \
10227                         deep_system_deps if pkg.operation != "merge"])
10228
10229         def _prune_digraph(self):
10230                 """
10231                 Prune any root nodes that are irrelevant.
10232                 """
10233
10234                 graph = self._digraph
10235                 completed_tasks = self._completed_tasks
10236                 removed_nodes = set()
10237                 while True:
10238                         for node in graph.root_nodes():
10239                                 if not isinstance(node, Package) or \
10240                                         (node.installed and node.operation == "nomerge") or \
10241                                         node.onlydeps or \
10242                                         node in completed_tasks:
10243                                         removed_nodes.add(node)
10244                         if removed_nodes:
10245                                 graph.difference_update(removed_nodes)
10246                         if not removed_nodes:
10247                                 break
10248                         removed_nodes.clear()
10249
10250         def _prevent_builddir_collisions(self):
10251                 """
10252                 When building stages, sometimes the same exact cpv needs to be merged
10253                 to both $ROOTs. Add edges to the digraph in order to avoid collisions
10254                 in the builddir. Currently, normal file locks would be inappropriate
10255                 for this purpose since emerge holds all of it's build dir locks from
10256                 the main process.
10257                 """
10258                 cpv_map = {}
10259                 for pkg in self._mergelist:
10260                         if not isinstance(pkg, Package):
10261                                 # a satisfied blocker
10262                                 continue
10263                         if pkg.installed:
10264                                 continue
10265                         if pkg.cpv not in cpv_map:
10266                                 cpv_map[pkg.cpv] = [pkg]
10267                                 continue
10268                         for earlier_pkg in cpv_map[pkg.cpv]:
10269                                 self._digraph.add(earlier_pkg, pkg,
10270                                         priority=DepPriority(buildtime=True))
10271                         cpv_map[pkg.cpv].append(pkg)
10272
10273         class _pkg_failure(portage.exception.PortageException):
10274                 """
10275                 An instance of this class is raised by unmerge() when
10276                 an uninstallation fails.
10277                 """
10278                 status = 1
10279                 def __init__(self, *pargs):
10280                         portage.exception.PortageException.__init__(self, pargs)
10281                         if pargs:
10282                                 self.status = pargs[0]
10283
10284         def _schedule_fetch(self, fetcher):
10285                 """
10286                 Schedule a fetcher on the fetch queue, in order to
10287                 serialize access to the fetch log.
10288                 """
10289                 self._task_queues.fetch.addFront(fetcher)
10290
10291         def _schedule_setup(self, setup_phase):
10292                 """
10293                 Schedule a setup phase on the merge queue, in order to
10294                 serialize unsandboxed access to the live filesystem.
10295                 """
10296                 self._task_queues.merge.addFront(setup_phase)
10297                 self._schedule()
10298
10299         def _schedule_unpack(self, unpack_phase):
10300                 """
10301                 Schedule an unpack phase on the unpack queue, in order
10302                 to serialize $DISTDIR access for live ebuilds.
10303                 """
10304                 self._task_queues.unpack.add(unpack_phase)
10305
10306         def _find_blockers(self, new_pkg):
10307                 """
10308                 Returns a callable which should be called only when
10309                 the vdb lock has been acquired.
10310                 """
10311                 def get_blockers():
10312                         return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
10313                 return get_blockers
10314
10315         def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
10316                 if self._opts_ignore_blockers.intersection(self.myopts):
10317                         return None
10318
10319                 # Call gc.collect() here to avoid heap overflow that
10320                 # triggers 'Cannot allocate memory' errors (reported
10321                 # with python-2.5).
10322                 import gc
10323                 gc.collect()
10324
10325                 blocker_db = self._blocker_db[new_pkg.root]
10326
10327                 blocker_dblinks = []
10328                 for blocking_pkg in blocker_db.findInstalledBlockers(
10329                         new_pkg, acquire_lock=acquire_lock):
10330                         if new_pkg.slot_atom == blocking_pkg.slot_atom:
10331                                 continue
10332                         if new_pkg.cpv == blocking_pkg.cpv:
10333                                 continue
10334                         blocker_dblinks.append(portage.dblink(
10335                                 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
10336                                 self.pkgsettings[blocking_pkg.root], treetype="vartree",
10337                                 vartree=self.trees[blocking_pkg.root]["vartree"]))
10338
10339                 gc.collect()
10340
10341                 return blocker_dblinks
10342
10343         def _dblink_pkg(self, pkg_dblink):
10344                 cpv = pkg_dblink.mycpv
10345                 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10346                 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10347                 installed = type_name == "installed"
10348                 return self._pkg(cpv, type_name, root_config, installed=installed)
10349
10350         def _append_to_log_path(self, log_path, msg):
10351                 f = open(log_path, 'a')
10352                 try:
10353                         f.write(msg)
10354                 finally:
10355                         f.close()
10356
10357         def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10358
10359                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10360                 log_file = None
10361                 out = sys.stdout
10362                 background = self._background
10363
10364                 if background and log_path is not None:
10365                         log_file = open(log_path, 'a')
10366                         out = log_file
10367
10368                 try:
10369                         for msg in msgs:
10370                                 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10371                 finally:
10372                         if log_file is not None:
10373                                 log_file.close()
10374
10375         def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10376                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10377                 background = self._background
10378
10379                 if log_path is None:
10380                         if not (background and level < logging.WARN):
10381                                 portage.util.writemsg_level(msg,
10382                                         level=level, noiselevel=noiselevel)
10383                 else:
10384                         if not background:
10385                                 portage.util.writemsg_level(msg,
10386                                         level=level, noiselevel=noiselevel)
10387                         self._append_to_log_path(log_path, msg)
10388
10389         def _dblink_ebuild_phase(self,
10390                 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10391                 """
10392                 Using this callback for merge phases allows the scheduler
10393                 to run while these phases execute asynchronously, and allows
10394                 the scheduler control output handling.
10395                 """
10396
10397                 scheduler = self._sched_iface
10398                 settings = pkg_dblink.settings
10399                 pkg = self._dblink_pkg(pkg_dblink)
10400                 background = self._background
10401                 log_path = settings.get("PORTAGE_LOG_FILE")
10402
10403                 ebuild_phase = EbuildPhase(background=background,
10404                         pkg=pkg, phase=phase, scheduler=scheduler,
10405                         settings=settings, tree=pkg_dblink.treetype)
10406                 ebuild_phase.start()
10407                 ebuild_phase.wait()
10408
10409                 return ebuild_phase.returncode
10410
10411         def _check_manifests(self):
10412                 # Verify all the manifests now so that the user is notified of failure
10413                 # as soon as possible.
10414                 if "strict" not in self.settings.features or \
10415                         "--fetchonly" in self.myopts or \
10416                         "--fetch-all-uri" in self.myopts:
10417                         return os.EX_OK
10418
10419                 shown_verifying_msg = False
10420                 quiet_settings = {}
10421                 for myroot, pkgsettings in self.pkgsettings.iteritems():
10422                         quiet_config = portage.config(clone=pkgsettings)
10423                         quiet_config["PORTAGE_QUIET"] = "1"
10424                         quiet_config.backup_changes("PORTAGE_QUIET")
10425                         quiet_settings[myroot] = quiet_config
10426                         del quiet_config
10427
10428                 for x in self._mergelist:
10429                         if not isinstance(x, Package) or \
10430                                 x.type_name != "ebuild":
10431                                 continue
10432
10433                         if not shown_verifying_msg:
10434                                 shown_verifying_msg = True
10435                                 self._status_msg("Verifying ebuild manifests")
10436
10437                         root_config = x.root_config
10438                         portdb = root_config.trees["porttree"].dbapi
10439                         quiet_config = quiet_settings[root_config.root]
10440                         quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10441                         if not portage.digestcheck([], quiet_config, strict=True):
10442                                 return 1
10443
10444                 return os.EX_OK
10445
10446         def _add_prefetchers(self):
10447
10448                 if not self._parallel_fetch:
10449                         return
10450
10451                 if self._parallel_fetch:
10452                         self._status_msg("Starting parallel fetch")
10453
10454                         prefetchers = self._prefetchers
10455                         getbinpkg = "--getbinpkg" in self.myopts
10456
10457                         # In order to avoid "waiting for lock" messages
10458                         # at the beginning, which annoy users, never
10459                         # spawn a prefetcher for the first package.
10460                         for pkg in self._mergelist[1:]:
10461                                 prefetcher = self._create_prefetcher(pkg)
10462                                 if prefetcher is not None:
10463                                         self._task_queues.fetch.add(prefetcher)
10464                                         prefetchers[pkg] = prefetcher
10465
10466         def _create_prefetcher(self, pkg):
10467                 """
10468                 @return: a prefetcher, or None if not applicable
10469                 """
10470                 prefetcher = None
10471
10472                 if not isinstance(pkg, Package):
10473                         pass
10474
10475                 elif pkg.type_name == "ebuild":
10476
10477                         prefetcher = EbuildFetcher(background=True,
10478                                 config_pool=self._ConfigPool(pkg.root,
10479                                 self._allocate_config, self._deallocate_config),
10480                                 fetchonly=1, logfile=self._fetch_log,
10481                                 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10482
10483                 elif pkg.type_name == "binary" and \
10484                         "--getbinpkg" in self.myopts and \
10485                         pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10486
10487                         prefetcher = BinpkgPrefetcher(background=True,
10488                                 pkg=pkg, scheduler=self._sched_iface)
10489
10490                 return prefetcher
10491
10492         def _is_restart_scheduled(self):
10493                 """
10494                 Check if the merge list contains a replacement
10495                 for the current running instance, that will result
10496                 in restart after merge.
10497                 @rtype: bool
10498                 @returns: True if a restart is scheduled, False otherwise.
10499                 """
10500                 if self._opts_no_restart.intersection(self.myopts):
10501                         return False
10502
10503                 mergelist = self._mergelist
10504
10505                 for i, pkg in enumerate(mergelist):
10506                         if self._is_restart_necessary(pkg) and \
10507                                 i != len(mergelist) - 1:
10508                                 return True
10509
10510                 return False
10511
10512         def _is_restart_necessary(self, pkg):
10513                 """
10514                 @return: True if merging the given package
10515                         requires restart, False otherwise.
10516                 """
10517
10518                 # Figure out if we need a restart.
10519                 if pkg.root == self._running_root.root and \
10520                         portage.match_from_list(
10521                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10522                         if self._running_portage:
10523                                 return pkg.cpv != self._running_portage.cpv
10524                         return True
10525                 return False
10526
10527         def _restart_if_necessary(self, pkg):
10528                 """
10529                 Use execv() to restart emerge. This happens
10530                 if portage upgrades itself and there are
10531                 remaining packages in the list.
10532                 """
10533
10534                 if self._opts_no_restart.intersection(self.myopts):
10535                         return
10536
10537                 if not self._is_restart_necessary(pkg):
10538                         return
10539
10540                 if pkg == self._mergelist[-1]:
10541                         return
10542
10543                 self._main_loop_cleanup()
10544
10545                 logger = self._logger
10546                 pkg_count = self._pkg_count
10547                 mtimedb = self._mtimedb
10548                 bad_resume_opts = self._bad_resume_opts
10549
10550                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10551                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10552
10553                 logger.log(" *** RESTARTING " + \
10554                         "emerge via exec() after change of " + \
10555                         "portage version.")
10556
10557                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10558                 mtimedb.commit()
10559                 portage.run_exitfuncs()
10560                 mynewargv = [sys.argv[0], "--resume"]
10561                 resume_opts = self.myopts.copy()
10562                 # For automatic resume, we need to prevent
10563                 # any of bad_resume_opts from leaking in
10564                 # via EMERGE_DEFAULT_OPTS.
10565                 resume_opts["--ignore-default-opts"] = True
10566                 for myopt, myarg in resume_opts.iteritems():
10567                         if myopt not in bad_resume_opts:
10568                                 if myarg is True:
10569                                         mynewargv.append(myopt)
10570                                 else:
10571                                         mynewargv.append(myopt +"="+ str(myarg))
10572                 # priority only needs to be adjusted on the first run
10573                 os.environ["PORTAGE_NICENESS"] = "0"
10574                 os.execv(mynewargv[0], mynewargv)
10575
10576         def merge(self):
10577
10578                 if "--resume" in self.myopts:
10579                         # We're resuming.
10580                         portage.writemsg_stdout(
10581                                 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10582                         self._logger.log(" *** Resuming merge...")
10583
10584                 self._save_resume_list()
10585
10586                 try:
10587                         self._background = self._background_mode()
10588                 except self._unknown_internal_error:
10589                         return 1
10590
10591                 for root in self.trees:
10592                         root_config = self.trees[root]["root_config"]
10593
10594                         # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10595                         # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10596                         # for ensuring sane $PWD (bug #239560) and storing elog messages.
10597                         tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10598                         if not tmpdir or not os.path.isdir(tmpdir):
10599                                 msg = "The directory specified in your " + \
10600                                         "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10601                                 "does not exist. Please create this " + \
10602                                 "directory or correct your PORTAGE_TMPDIR setting."
10603                                 msg = textwrap.wrap(msg, 70)
10604                                 out = portage.output.EOutput()
10605                                 for l in msg:
10606                                         out.eerror(l)
10607                                 return 1
10608
10609                         if self._background:
10610                                 root_config.settings.unlock()
10611                                 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10612                                 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10613                                 root_config.settings.lock()
10614
10615                         self.pkgsettings[root] = portage.config(
10616                                 clone=root_config.settings)
10617
10618                 rval = self._check_manifests()
10619                 if rval != os.EX_OK:
10620                         return rval
10621
10622                 keep_going = "--keep-going" in self.myopts
10623                 fetchonly = self._build_opts.fetchonly
10624                 mtimedb = self._mtimedb
10625                 failed_pkgs = self._failed_pkgs
10626
10627                 while True:
10628                         rval = self._merge()
10629                         if rval == os.EX_OK or fetchonly or not keep_going:
10630                                 break
10631                         if "resume" not in mtimedb:
10632                                 break
10633                         mergelist = self._mtimedb["resume"].get("mergelist")
10634                         if not mergelist:
10635                                 break
10636
10637                         if not failed_pkgs:
10638                                 break
10639
10640                         for failed_pkg in failed_pkgs:
10641                                 mergelist.remove(list(failed_pkg.pkg))
10642
10643                         self._failed_pkgs_all.extend(failed_pkgs)
10644                         del failed_pkgs[:]
10645
10646                         if not mergelist:
10647                                 break
10648
10649                         if not self._calc_resume_list():
10650                                 break
10651
10652                         clear_caches(self.trees)
10653                         if not self._mergelist:
10654                                 break
10655
10656                         self._save_resume_list()
10657                         self._pkg_count.curval = 0
10658                         self._pkg_count.maxval = len([x for x in self._mergelist \
10659                                 if isinstance(x, Package) and x.operation == "merge"])
10660                         self._status_display.maxval = self._pkg_count.maxval
10661
10662                 self._logger.log(" *** Finished. Cleaning up...")
10663
10664                 if failed_pkgs:
10665                         self._failed_pkgs_all.extend(failed_pkgs)
10666                         del failed_pkgs[:]
10667
10668                 background = self._background
10669                 failure_log_shown = False
10670                 if background and len(self._failed_pkgs_all) == 1:
10671                         # If only one package failed then just show it's
10672                         # whole log for easy viewing.
10673                         failed_pkg = self._failed_pkgs_all[-1]
10674                         build_dir = failed_pkg.build_dir
10675                         log_file = None
10676
10677                         log_paths = [failed_pkg.build_log]
10678
10679                         log_path = self._locate_failure_log(failed_pkg)
10680                         if log_path is not None:
10681                                 try:
10682                                         log_file = open(log_path)
10683                                 except IOError:
10684                                         pass
10685
10686                         if log_file is not None:
10687                                 try:
10688                                         for line in log_file:
10689                                                 writemsg_level(line, noiselevel=-1)
10690                                 finally:
10691                                         log_file.close()
10692                                 failure_log_shown = True
10693
10694                 # Dump mod_echo output now since it tends to flood the terminal.
10695                 # This allows us to avoid having more important output, generated
10696                 # later, from being swept away by the mod_echo output.
10697                 mod_echo_output =  _flush_elog_mod_echo()
10698
10699                 if background and not failure_log_shown and \
10700                         self._failed_pkgs_all and \
10701                         self._failed_pkgs_die_msgs and \
10702                         not mod_echo_output:
10703
10704                         printer = portage.output.EOutput()
10705                         for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10706                                 root_msg = ""
10707                                 if mysettings["ROOT"] != "/":
10708                                         root_msg = " merged to %s" % mysettings["ROOT"]
10709                                 print
10710                                 printer.einfo("Error messages for package %s%s:" % \
10711                                         (colorize("INFORM", key), root_msg))
10712                                 print
10713                                 for phase in portage.const.EBUILD_PHASES:
10714                                         if phase not in logentries:
10715                                                 continue
10716                                         for msgtype, msgcontent in logentries[phase]:
10717                                                 if isinstance(msgcontent, basestring):
10718                                                         msgcontent = [msgcontent]
10719                                                 for line in msgcontent:
10720                                                         printer.eerror(line.strip("\n"))
10721
10722                 if self._post_mod_echo_msgs:
10723                         for msg in self._post_mod_echo_msgs:
10724                                 msg()
10725
10726                 if len(self._failed_pkgs_all) > 1 or \
10727                         (self._failed_pkgs_all and "--keep-going" in self.myopts):
10728                         if len(self._failed_pkgs_all) > 1:
10729                                 msg = "The following %d packages have " % \
10730                                         len(self._failed_pkgs_all) + \
10731                                         "failed to build or install:"
10732                         else:
10733                                 msg = "The following package has " + \
10734                                         "failed to build or install:"
10735                         prefix = bad(" * ")
10736                         writemsg(prefix + "\n", noiselevel=-1)
10737                         from textwrap import wrap
10738                         for line in wrap(msg, 72):
10739                                 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10740                         writemsg(prefix + "\n", noiselevel=-1)
10741                         for failed_pkg in self._failed_pkgs_all:
10742                                 writemsg("%s\t%s\n" % (prefix,
10743                                         colorize("INFORM", str(failed_pkg.pkg))),
10744                                         noiselevel=-1)
10745                         writemsg(prefix + "\n", noiselevel=-1)
10746
10747                 return rval
10748
10749         def _elog_listener(self, mysettings, key, logentries, fulltext):
10750                 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10751                 if errors:
10752                         self._failed_pkgs_die_msgs.append(
10753                                 (mysettings, key, errors))
10754
10755         def _locate_failure_log(self, failed_pkg):
10756
10757                 build_dir = failed_pkg.build_dir
10758                 log_file = None
10759
10760                 log_paths = [failed_pkg.build_log]
10761
10762                 for log_path in log_paths:
10763                         if not log_path:
10764                                 continue
10765
10766                         try:
10767                                 log_size = os.stat(log_path).st_size
10768                         except OSError:
10769                                 continue
10770
10771                         if log_size == 0:
10772                                 continue
10773
10774                         return log_path
10775
10776                 return None
10777
10778         def _add_packages(self):
10779                 pkg_queue = self._pkg_queue
10780                 for pkg in self._mergelist:
10781                         if isinstance(pkg, Package):
10782                                 pkg_queue.append(pkg)
10783                         elif isinstance(pkg, Blocker):
10784                                 pass
10785
10786         def _system_merge_started(self, merge):
10787                 """
10788                 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
10789                 """
10790                 graph = self._digraph
10791                 if graph is None:
10792                         return
10793                 pkg = merge.merge.pkg
10794
10795                 # Skip this if $ROOT != / since it shouldn't matter if there
10796                 # are unsatisfied system runtime deps in this case.
10797                 if pkg.root != '/':
10798                         return
10799
10800                 completed_tasks = self._completed_tasks
10801                 unsatisfied = self._unsatisfied_system_deps
10802
10803                 def ignore_non_runtime_or_satisfied(priority):
10804                         """
10805                         Ignore non-runtime and satisfied runtime priorities.
10806                         """
10807                         if isinstance(priority, DepPriority) and \
10808                                 not priority.satisfied and \
10809                                 (priority.runtime or priority.runtime_post):
10810                                 return False
10811                         return True
10812
10813                 # When checking for unsatisfied runtime deps, only check
10814                 # direct deps since indirect deps are checked when the
10815                 # corresponding parent is merged.
10816                 for child in graph.child_nodes(pkg,
10817                         ignore_priority=ignore_non_runtime_or_satisfied):
10818                         if not isinstance(child, Package) or \
10819                                 child.operation == 'uninstall':
10820                                 continue
10821                         if child is pkg:
10822                                 continue
10823                         if child.operation == 'merge' and \
10824                                 child not in completed_tasks:
10825                                 unsatisfied.add(child)
10826
10827         def _merge_wait_exit_handler(self, task):
10828                 self._merge_wait_scheduled.remove(task)
10829                 self._merge_exit(task)
10830
10831         def _merge_exit(self, merge):
10832                 self._do_merge_exit(merge)
10833                 self._deallocate_config(merge.merge.settings)
10834                 if merge.returncode == os.EX_OK and \
10835                         not merge.merge.pkg.installed:
10836                         self._status_display.curval += 1
10837                 self._status_display.merges = len(self._task_queues.merge)
10838                 self._schedule()
10839
10840         def _do_merge_exit(self, merge):
10841                 pkg = merge.merge.pkg
10842                 if merge.returncode != os.EX_OK:
10843                         settings = merge.merge.settings
10844                         build_dir = settings.get("PORTAGE_BUILDDIR")
10845                         build_log = settings.get("PORTAGE_LOG_FILE")
10846
10847                         self._failed_pkgs.append(self._failed_pkg(
10848                                 build_dir=build_dir, build_log=build_log,
10849                                 pkg=pkg,
10850                                 returncode=merge.returncode))
10851                         self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
10852
10853                         self._status_display.failed = len(self._failed_pkgs)
10854                         return
10855
10856                 self._task_complete(pkg)
10857                 pkg_to_replace = merge.merge.pkg_to_replace
10858                 if pkg_to_replace is not None:
10859                         # When a package is replaced, mark it's uninstall
10860                         # task complete (if any).
10861                         uninst_hash_key = \
10862                                 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
10863                         self._task_complete(uninst_hash_key)
10864
10865                 if pkg.installed:
10866                         return
10867
10868                 self._restart_if_necessary(pkg)
10869
10870                 # Call mtimedb.commit() after each merge so that
10871                 # --resume still works after being interrupted
10872                 # by reboot, sigkill or similar.
10873                 mtimedb = self._mtimedb
10874                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10875                 if not mtimedb["resume"]["mergelist"]:
10876                         del mtimedb["resume"]
10877                 mtimedb.commit()
10878
10879         def _build_exit(self, build):
10880                 if build.returncode == os.EX_OK:
10881                         self.curval += 1
10882                         merge = PackageMerge(merge=build)
10883                         if not build.build_opts.buildpkgonly and \
10884                                 build.pkg in self._deep_system_deps:
10885                                 # Since dependencies on system packages are frequently
10886                                 # unspecified, merge them only when no builds are executing.
10887                                 self._merge_wait_queue.append(merge)
10888                                 merge.addStartListener(self._system_merge_started)
10889                         else:
10890                                 merge.addExitListener(self._merge_exit)
10891                                 self._task_queues.merge.add(merge)
10892                                 self._status_display.merges = len(self._task_queues.merge)
10893                 else:
10894                         settings = build.settings
10895                         build_dir = settings.get("PORTAGE_BUILDDIR")
10896                         build_log = settings.get("PORTAGE_LOG_FILE")
10897
10898                         self._failed_pkgs.append(self._failed_pkg(
10899                                 build_dir=build_dir, build_log=build_log,
10900                                 pkg=build.pkg,
10901                                 returncode=build.returncode))
10902                         self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
10903
10904                         self._status_display.failed = len(self._failed_pkgs)
10905                         self._deallocate_config(build.settings)
10906                 self._jobs -= 1
10907                 self._status_display.running = self._jobs
10908                 self._schedule()
10909
10910         def _extract_exit(self, build):
10911                 self._build_exit(build)
10912
10913         def _task_complete(self, pkg):
10914                 self._completed_tasks.add(pkg)
10915                 self._unsatisfied_system_deps.discard(pkg)
10916                 self._choose_pkg_return_early = False
10917
10918         def _merge(self):
10919
10920                 self._add_prefetchers()
10921                 self._add_packages()
10922                 pkg_queue = self._pkg_queue
10923                 failed_pkgs = self._failed_pkgs
10924                 portage.locks._quiet = self._background
10925                 portage.elog._emerge_elog_listener = self._elog_listener
10926                 rval = os.EX_OK
10927
10928                 try:
10929                         self._main_loop()
10930                 finally:
10931                         self._main_loop_cleanup()
10932                         portage.locks._quiet = False
10933                         portage.elog._emerge_elog_listener = None
10934                         if failed_pkgs:
10935                                 rval = failed_pkgs[-1].returncode
10936
10937                 return rval
10938
10939         def _main_loop_cleanup(self):
10940                 del self._pkg_queue[:]
10941                 self._completed_tasks.clear()
10942                 self._deep_system_deps.clear()
10943                 self._unsatisfied_system_deps.clear()
10944                 self._choose_pkg_return_early = False
10945                 self._status_display.reset()
10946                 self._digraph = None
10947                 self._task_queues.fetch.clear()
10948
10949         def _choose_pkg(self):
10950                 """
10951                 Choose a task that has all it's dependencies satisfied.
10952                 """
10953
10954                 if self._choose_pkg_return_early:
10955                         return None
10956
10957                 if self._digraph is None:
10958                         if (self._jobs or self._task_queues.merge) and \
10959                                 not ("--nodeps" in self.myopts and \
10960                                 (self._max_jobs is True or self._max_jobs > 1)):
10961                                 self._choose_pkg_return_early = True
10962                                 return None
10963                         return self._pkg_queue.pop(0)
10964
10965                 if not (self._jobs or self._task_queues.merge):
10966                         return self._pkg_queue.pop(0)
10967
10968                 self._prune_digraph()
10969
10970                 chosen_pkg = None
10971                 later = set(self._pkg_queue)
10972                 for pkg in self._pkg_queue:
10973                         later.remove(pkg)
10974                         if not self._dependent_on_scheduled_merges(pkg, later):
10975                                 chosen_pkg = pkg
10976                                 break
10977
10978                 if chosen_pkg is not None:
10979                         self._pkg_queue.remove(chosen_pkg)
10980
10981                 if chosen_pkg is None:
10982                         # There's no point in searching for a package to
10983                         # choose until at least one of the existing jobs
10984                         # completes.
10985                         self._choose_pkg_return_early = True
10986
10987                 return chosen_pkg
10988
10989         def _dependent_on_scheduled_merges(self, pkg, later):
10990                 """
10991                 Traverse the subgraph of the given packages deep dependencies
10992                 to see if it contains any scheduled merges.
10993                 @param pkg: a package to check dependencies for
10994                 @type pkg: Package
10995                 @param later: packages for which dependence should be ignored
10996                         since they will be merged later than pkg anyway and therefore
10997                         delaying the merge of pkg will not result in a more optimal
10998                         merge order
10999                 @type later: set
11000                 @rtype: bool
11001                 @returns: True if the package is dependent, False otherwise.
11002                 """
11003
11004                 graph = self._digraph
11005                 completed_tasks = self._completed_tasks
11006
11007                 dependent = False
11008                 traversed_nodes = set([pkg])
11009                 direct_deps = graph.child_nodes(pkg)
11010                 node_stack = direct_deps
11011                 direct_deps = frozenset(direct_deps)
11012                 while node_stack:
11013                         node = node_stack.pop()
11014                         if node in traversed_nodes:
11015                                 continue
11016                         traversed_nodes.add(node)
11017                         if not ((node.installed and node.operation == "nomerge") or \
11018                                 (node.operation == "uninstall" and \
11019                                 node not in direct_deps) or \
11020                                 node in completed_tasks or \
11021                                 node in later):
11022                                 dependent = True
11023                                 break
11024                         node_stack.extend(graph.child_nodes(node))
11025
11026                 return dependent
11027
11028         def _allocate_config(self, root):
11029                 """
11030                 Allocate a unique config instance for a task in order
11031                 to prevent interference between parallel tasks.
11032                 """
11033                 if self._config_pool[root]:
11034                         temp_settings = self._config_pool[root].pop()
11035                 else:
11036                         temp_settings = portage.config(clone=self.pkgsettings[root])
11037                 # Since config.setcpv() isn't guaranteed to call config.reset() due to
11038                 # performance reasons, call it here to make sure all settings from the
11039                 # previous package get flushed out (such as PORTAGE_LOG_FILE).
11040                 temp_settings.reload()
11041                 temp_settings.reset()
11042                 return temp_settings
11043
11044         def _deallocate_config(self, settings):
11045                 self._config_pool[settings["ROOT"]].append(settings)
11046
11047         def _main_loop(self):
11048
11049                 # Only allow 1 job max if a restart is scheduled
11050                 # due to portage update.
11051                 if self._is_restart_scheduled() or \
11052                         self._opts_no_background.intersection(self.myopts):
11053                         self._set_max_jobs(1)
11054
11055                 merge_queue = self._task_queues.merge
11056
11057                 while self._schedule():
11058                         if self._poll_event_handlers:
11059                                 self._poll_loop()
11060
11061                 while True:
11062                         self._schedule()
11063                         if not (self._jobs or merge_queue):
11064                                 break
11065                         if self._poll_event_handlers:
11066                                 self._poll_loop()
11067
11068         def _keep_scheduling(self):
11069                 return bool(self._pkg_queue and \
11070                         not (self._failed_pkgs and not self._build_opts.fetchonly))
11071
11072         def _schedule_tasks(self):
11073
11074                 # When the number of jobs drops to zero, process all waiting merges.
11075                 if not self._jobs and self._merge_wait_queue:
11076                         for task in self._merge_wait_queue:
11077                                 task.addExitListener(self._merge_wait_exit_handler)
11078                                 self._task_queues.merge.add(task)
11079                         self._status_display.merges = len(self._task_queues.merge)
11080                         self._merge_wait_scheduled.extend(self._merge_wait_queue)
11081                         del self._merge_wait_queue[:]
11082
11083                 self._schedule_tasks_imp()
11084                 self._status_display.display()
11085
11086                 state_change = 0
11087                 for q in self._task_queues.values():
11088                         if q.schedule():
11089                                 state_change += 1
11090
11091                 # Cancel prefetchers if they're the only reason
11092                 # the main poll loop is still running.
11093                 if self._failed_pkgs and not self._build_opts.fetchonly and \
11094                         not (self._jobs or self._task_queues.merge) and \
11095                         self._task_queues.fetch:
11096                         self._task_queues.fetch.clear()
11097                         state_change += 1
11098
11099                 if state_change:
11100                         self._schedule_tasks_imp()
11101                         self._status_display.display()
11102
11103                 return self._keep_scheduling()
11104
11105         def _job_delay(self):
11106                 """
11107                 @rtype: bool
11108                 @returns: True if job scheduling should be delayed, False otherwise.
11109                 """
11110
11111                 if self._jobs and self._max_load is not None:
11112
11113                         current_time = time.time()
11114
11115                         delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
11116                         if delay > self._job_delay_max:
11117                                 delay = self._job_delay_max
11118                         if (current_time - self._previous_job_start_time) < delay:
11119                                 return True
11120
11121                 return False
11122
11123         def _schedule_tasks_imp(self):
11124                 """
11125                 @rtype: bool
11126                 @returns: True if state changed, False otherwise.
11127                 """
11128
11129                 state_change = 0
11130
11131                 while True:
11132
11133                         if not self._keep_scheduling():
11134                                 return bool(state_change)
11135
11136                         if self._choose_pkg_return_early or \
11137                                 self._merge_wait_scheduled or \
11138                                 (self._jobs and self._unsatisfied_system_deps) or \
11139                                 not self._can_add_job() or \
11140                                 self._job_delay():
11141                                 return bool(state_change)
11142
11143                         pkg = self._choose_pkg()
11144                         if pkg is None:
11145                                 return bool(state_change)
11146
11147                         state_change += 1
11148
11149                         if not pkg.installed:
11150                                 self._pkg_count.curval += 1
11151
11152                         task = self._task(pkg)
11153
11154                         if pkg.installed:
11155                                 merge = PackageMerge(merge=task)
11156                                 merge.addExitListener(self._merge_exit)
11157                                 self._task_queues.merge.add(merge)
11158
11159                         elif pkg.built:
11160                                 self._jobs += 1
11161                                 self._previous_job_start_time = time.time()
11162                                 self._status_display.running = self._jobs
11163                                 task.addExitListener(self._extract_exit)
11164                                 self._task_queues.jobs.add(task)
11165
11166                         else:
11167                                 self._jobs += 1
11168                                 self._previous_job_start_time = time.time()
11169                                 self._status_display.running = self._jobs
11170                                 task.addExitListener(self._build_exit)
11171                                 self._task_queues.jobs.add(task)
11172
11173                 return bool(state_change)
11174
11175         def _task(self, pkg):
11176
11177                 pkg_to_replace = None
11178                 if pkg.operation != "uninstall":
11179                         vardb = pkg.root_config.trees["vartree"].dbapi
11180                         previous_cpv = vardb.match(pkg.slot_atom)
11181                         if previous_cpv:
11182                                 previous_cpv = previous_cpv.pop()
11183                                 pkg_to_replace = self._pkg(previous_cpv,
11184                                         "installed", pkg.root_config, installed=True)
11185
11186                 task = MergeListItem(args_set=self._args_set,
11187                         background=self._background, binpkg_opts=self._binpkg_opts,
11188                         build_opts=self._build_opts,
11189                         config_pool=self._ConfigPool(pkg.root,
11190                         self._allocate_config, self._deallocate_config),
11191                         emerge_opts=self.myopts,
11192                         find_blockers=self._find_blockers(pkg), logger=self._logger,
11193                         mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
11194                         pkg_to_replace=pkg_to_replace,
11195                         prefetcher=self._prefetchers.get(pkg),
11196                         scheduler=self._sched_iface,
11197                         settings=self._allocate_config(pkg.root),
11198                         statusMessage=self._status_msg,
11199                         world_atom=self._world_atom)
11200
11201                 return task
11202
11203         def _failed_pkg_msg(self, failed_pkg, action, preposition):
11204                 pkg = failed_pkg.pkg
11205                 msg = "%s to %s %s" % \
11206                         (bad("Failed"), action, colorize("INFORM", pkg.cpv))
11207                 if pkg.root != "/":
11208                         msg += " %s %s" % (preposition, pkg.root)
11209
11210                 log_path = self._locate_failure_log(failed_pkg)
11211                 if log_path is not None:
11212                         msg += ", Log file:"
11213                 self._status_msg(msg)
11214
11215                 if log_path is not None:
11216                         self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
11217
11218         def _status_msg(self, msg):
11219                 """
11220                 Display a brief status message (no newlines) in the status display.
11221                 This is called by tasks to provide feedback to the user. This
11222                 delegates the resposibility of generating \r and \n control characters,
11223                 to guarantee that lines are created or erased when necessary and
11224                 appropriate.
11225
11226                 @type msg: str
11227                 @param msg: a brief status message (no newlines allowed)
11228                 """
11229                 if not self._background:
11230                         writemsg_level("\n")
11231                 self._status_display.displayMessage(msg)
11232
11233         def _save_resume_list(self):
11234                 """
11235                 Do this before verifying the ebuild Manifests since it might
11236                 be possible for the user to use --resume --skipfirst get past
11237                 a non-essential package with a broken digest.
11238                 """
11239                 mtimedb = self._mtimedb
11240                 mtimedb["resume"]["mergelist"] = [list(x) \
11241                         for x in self._mergelist \
11242                         if isinstance(x, Package) and x.operation == "merge"]
11243
11244                 mtimedb.commit()
11245
11246         def _calc_resume_list(self):
11247                 """
11248                 Use the current resume list to calculate a new one,
11249                 dropping any packages with unsatisfied deps.
11250                 @rtype: bool
11251                 @returns: True if successful, False otherwise.
11252                 """
11253                 print colorize("GOOD", "*** Resuming merge...")
11254
11255                 if self._show_list():
11256                         if "--tree" in self.myopts:
11257                                 portage.writemsg_stdout("\n" + \
11258                                         darkgreen("These are the packages that " + \
11259                                         "would be merged, in reverse order:\n\n"))
11260
11261                         else:
11262                                 portage.writemsg_stdout("\n" + \
11263                                         darkgreen("These are the packages that " + \
11264                                         "would be merged, in order:\n\n"))
11265
11266                 show_spinner = "--quiet" not in self.myopts and \
11267                         "--nodeps" not in self.myopts
11268
11269                 if show_spinner:
11270                         print "Calculating dependencies  ",
11271
11272                 myparams = create_depgraph_params(self.myopts, None)
11273                 success = False
11274                 e = None
11275                 try:
11276                         success, mydepgraph, dropped_tasks = resume_depgraph(
11277                                 self.settings, self.trees, self._mtimedb, self.myopts,
11278                                 myparams, self._spinner)
11279                 except depgraph.UnsatisfiedResumeDep, exc:
11280                         # rename variable to avoid python-3.0 error:
11281                         # SyntaxError: can not delete variable 'e' referenced in nested
11282                         #              scope
11283                         e = exc
11284                         mydepgraph = e.depgraph
11285                         dropped_tasks = set()
11286
11287                 if show_spinner:
11288                         print "\b\b... done!"
11289
11290                 if e is not None:
11291                         def unsatisfied_resume_dep_msg():
11292                                 mydepgraph.display_problems()
11293                                 out = portage.output.EOutput()
11294                                 out.eerror("One or more packages are either masked or " + \
11295                                         "have missing dependencies:")
11296                                 out.eerror("")
11297                                 indent = "  "
11298                                 show_parents = set()
11299                                 for dep in e.value:
11300                                         if dep.parent in show_parents:
11301                                                 continue
11302                                         show_parents.add(dep.parent)
11303                                         if dep.atom is None:
11304                                                 out.eerror(indent + "Masked package:")
11305                                                 out.eerror(2 * indent + str(dep.parent))
11306                                                 out.eerror("")
11307                                         else:
11308                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
11309                                                 out.eerror(2 * indent + str(dep.parent))
11310                                                 out.eerror("")
11311                                 msg = "The resume list contains packages " + \
11312                                         "that are either masked or have " + \
11313                                         "unsatisfied dependencies. " + \
11314                                         "Please restart/continue " + \
11315                                         "the operation manually, or use --skipfirst " + \
11316                                         "to skip the first package in the list and " + \
11317                                         "any other packages that may be " + \
11318                                         "masked or have missing dependencies."
11319                                 for line in textwrap.wrap(msg, 72):
11320                                         out.eerror(line)
11321                         self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
11322                         return False
11323
11324                 if success and self._show_list():
11325                         mylist = mydepgraph.altlist()
11326                         if mylist:
11327                                 if "--tree" in self.myopts:
11328                                         mylist.reverse()
11329                                 mydepgraph.display(mylist, favorites=self._favorites)
11330
11331                 if not success:
11332                         self._post_mod_echo_msgs.append(mydepgraph.display_problems)
11333                         return False
11334                 mydepgraph.display_problems()
11335
11336                 mylist = mydepgraph.altlist()
11337                 mydepgraph.break_refs(mylist)
11338                 mydepgraph.break_refs(dropped_tasks)
11339                 self._mergelist = mylist
11340                 self._set_digraph(mydepgraph.schedulerGraph())
11341
11342                 msg_width = 75
11343                 for task in dropped_tasks:
11344                         if not (isinstance(task, Package) and task.operation == "merge"):
11345                                 continue
11346                         pkg = task
11347                         msg = "emerge --keep-going:" + \
11348                                 " %s" % (pkg.cpv,)
11349                         if pkg.root != "/":
11350                                 msg += " for %s" % (pkg.root,)
11351                         msg += " dropped due to unsatisfied dependency."
11352                         for line in textwrap.wrap(msg, msg_width):
11353                                 eerror(line, phase="other", key=pkg.cpv)
11354                         settings = self.pkgsettings[pkg.root]
11355                         # Ensure that log collection from $T is disabled inside
11356                         # elog_process(), since any logs that might exist are
11357                         # not valid here.
11358                         settings.pop("T", None)
11359                         portage.elog.elog_process(pkg.cpv, settings)
11360                         self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
11361
11362                 return True
11363
11364         def _show_list(self):
11365                 myopts = self.myopts
11366                 if "--quiet" not in myopts and \
11367                         ("--ask" in myopts or "--tree" in myopts or \
11368                         "--verbose" in myopts):
11369                         return True
11370                 return False
11371
11372         def _world_atom(self, pkg):
11373                 """
11374                 Add the package to the world file, but only if
11375                 it's supposed to be added. Otherwise, do nothing.
11376                 """
11377
11378                 if set(("--buildpkgonly", "--fetchonly",
11379                         "--fetch-all-uri",
11380                         "--oneshot", "--onlydeps",
11381                         "--pretend")).intersection(self.myopts):
11382                         return
11383
11384                 if pkg.root != self.target_root:
11385                         return
11386
11387                 args_set = self._args_set
11388                 if not args_set.findAtomForPackage(pkg):
11389                         return
11390
11391                 logger = self._logger
11392                 pkg_count = self._pkg_count
11393                 root_config = pkg.root_config
11394                 world_set = root_config.sets["world"]
11395                 world_locked = False
11396                 if hasattr(world_set, "lock"):
11397                         world_set.lock()
11398                         world_locked = True
11399
11400                 try:
11401                         if hasattr(world_set, "load"):
11402                                 world_set.load() # maybe it's changed on disk
11403
11404                         atom = create_world_atom(pkg, args_set, root_config)
11405                         if atom:
11406                                 if hasattr(world_set, "add"):
11407                                         self._status_msg(('Recording %s in "world" ' + \
11408                                                 'favorites file...') % atom)
11409                                         logger.log(" === (%s of %s) Updating world file (%s)" % \
11410                                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
11411                                         world_set.add(atom)
11412                                 else:
11413                                         writemsg_level('\n!!! Unable to record %s in "world"\n' % \
11414                                                 (atom,), level=logging.WARN, noiselevel=-1)
11415                 finally:
11416                         if world_locked:
11417                                 world_set.unlock()
11418
11419         def _pkg(self, cpv, type_name, root_config, installed=False):
11420                 """
11421                 Get a package instance from the cache, or create a new
11422                 one if necessary. Raises KeyError from aux_get if it
11423                 failures for some reason (package does not exist or is
11424                 corrupt).
11425                 """
11426                 operation = "merge"
11427                 if installed:
11428                         operation = "nomerge"
11429
11430                 if self._digraph is not None:
11431                         # Reuse existing instance when available.
11432                         pkg = self._digraph.get(
11433                                 (type_name, root_config.root, cpv, operation))
11434                         if pkg is not None:
11435                                 return pkg
11436
11437                 tree_type = depgraph.pkg_tree_map[type_name]
11438                 db = root_config.trees[tree_type].dbapi
11439                 db_keys = list(self.trees[root_config.root][
11440                         tree_type].dbapi._aux_cache_keys)
11441                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11442                 pkg = Package(cpv=cpv, metadata=metadata,
11443                         root_config=root_config, installed=installed)
11444                 if type_name == "ebuild":
11445                         settings = self.pkgsettings[root_config.root]
11446                         settings.setcpv(pkg)
11447                         pkg.metadata["USE"] = settings["PORTAGE_USE"]
11448                         pkg.metadata['CHOST'] = settings.get('CHOST', '')
11449
11450                 return pkg
11451
11452 class MetadataRegen(PollScheduler):
11453
11454         def __init__(self, portdb, max_jobs=None, max_load=None):
11455                 PollScheduler.__init__(self)
11456                 self._portdb = portdb
11457
11458                 if max_jobs is None:
11459                         max_jobs = 1
11460
11461                 self._max_jobs = max_jobs
11462                 self._max_load = max_load
11463                 self._sched_iface = self._sched_iface_class(
11464                         register=self._register,
11465                         schedule=self._schedule_wait,
11466                         unregister=self._unregister)
11467
11468                 self._valid_pkgs = set()
11469                 self._process_iter = self._iter_metadata_processes()
11470                 self.returncode = os.EX_OK
11471                 self._error_count = 0
11472
11473         def _iter_metadata_processes(self):
11474                 portdb = self._portdb
11475                 valid_pkgs = self._valid_pkgs
11476                 every_cp = portdb.cp_all()
11477                 every_cp.sort(reverse=True)
11478
11479                 while every_cp:
11480                         cp = every_cp.pop()
11481                         portage.writemsg_stdout("Processing %s\n" % cp)
11482                         cpv_list = portdb.cp_list(cp)
11483                         for cpv in cpv_list:
11484                                 valid_pkgs.add(cpv)
11485                                 ebuild_path, repo_path = portdb.findname2(cpv)
11486                                 metadata_process = portdb._metadata_process(
11487                                         cpv, ebuild_path, repo_path)
11488                                 if metadata_process is None:
11489                                         continue
11490                                 yield metadata_process
11491
11492         def run(self):
11493
11494                 portdb = self._portdb
11495                 from portage.cache.cache_errors import CacheError
11496                 dead_nodes = {}
11497
11498                 for mytree in portdb.porttrees:
11499                         try:
11500                                 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11501                         except CacheError, e:
11502                                 portage.writemsg("Error listing cache entries for " + \
11503                                         "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1)
11504                                 del e
11505                                 dead_nodes = None
11506                                 break
11507
11508                 while self._schedule():
11509                         self._poll_loop()
11510
11511                 while self._jobs:
11512                         self._poll_loop()
11513
11514                 if dead_nodes:
11515                         for y in self._valid_pkgs:
11516                                 for mytree in portdb.porttrees:
11517                                         if portdb.findname2(y, mytree=mytree)[0]:
11518                                                 dead_nodes[mytree].discard(y)
11519
11520                         for mytree, nodes in dead_nodes.iteritems():
11521                                 auxdb = portdb.auxdb[mytree]
11522                                 for y in nodes:
11523                                         try:
11524                                                 del auxdb[y]
11525                                         except (KeyError, CacheError):
11526                                                 pass
11527
11528         def _schedule_tasks(self):
11529                 """
11530                 @rtype: bool
11531                 @returns: True if there may be remaining tasks to schedule,
11532                         False otherwise.
11533                 """
11534                 while self._can_add_job():
11535                         try:
11536                                 metadata_process = self._process_iter.next()
11537                         except StopIteration:
11538                                 return False
11539
11540                         self._jobs += 1
11541                         metadata_process.scheduler = self._sched_iface
11542                         metadata_process.addExitListener(self._metadata_exit)
11543                         metadata_process.start()
11544                 return True
11545
11546         def _metadata_exit(self, metadata_process):
11547                 self._jobs -= 1
11548                 if metadata_process.returncode != os.EX_OK:
11549                         self.returncode = 1
11550                         self._error_count += 1
11551                         self._valid_pkgs.discard(metadata_process.cpv)
11552                         portage.writemsg("Error processing %s, continuing...\n" % \
11553                                 (metadata_process.cpv,))
11554                 self._schedule()
11555
11556 class UninstallFailure(portage.exception.PortageException):
11557         """
11558         An instance of this class is raised by unmerge() when
11559         an uninstallation fails.
11560         """
11561         status = 1
11562         def __init__(self, *pargs):
11563                 portage.exception.PortageException.__init__(self, pargs)
11564                 if pargs:
11565                         self.status = pargs[0]
11566
11567 def unmerge(root_config, myopts, unmerge_action,
11568         unmerge_files, ldpath_mtimes, autoclean=0,
11569         clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11570         scheduler=None, writemsg_level=portage.util.writemsg_level):
11571
11572         quiet = "--quiet" in myopts
11573         settings = root_config.settings
11574         sets = root_config.sets
11575         vartree = root_config.trees["vartree"]
11576         candidate_catpkgs=[]
11577         global_unmerge=0
11578         xterm_titles = "notitles" not in settings.features
11579         out = portage.output.EOutput()
11580         pkg_cache = {}
11581         db_keys = list(vartree.dbapi._aux_cache_keys)
11582
11583         def _pkg(cpv):
11584                 pkg = pkg_cache.get(cpv)
11585                 if pkg is None:
11586                         pkg = Package(cpv=cpv, installed=True,
11587                                 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11588                                 root_config=root_config,
11589                                 type_name="installed")
11590                         pkg_cache[cpv] = pkg
11591                 return pkg
11592
11593         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11594         try:
11595                 # At least the parent needs to exist for the lock file.
11596                 portage.util.ensure_dirs(vdb_path)
11597         except portage.exception.PortageException:
11598                 pass
11599         vdb_lock = None
11600         try:
11601                 if os.access(vdb_path, os.W_OK):
11602                         vdb_lock = portage.locks.lockdir(vdb_path)
11603                 realsyslist = sets["system"].getAtoms()
11604                 syslist = []
11605                 for x in realsyslist:
11606                         mycp = portage.dep_getkey(x)
11607                         if mycp in settings.getvirtuals():
11608                                 providers = []
11609                                 for provider in settings.getvirtuals()[mycp]:
11610                                         if vartree.dbapi.match(provider):
11611                                                 providers.append(provider)
11612                                 if len(providers) == 1:
11613                                         syslist.extend(providers)
11614                         else:
11615                                 syslist.append(mycp)
11616         
11617                 mysettings = portage.config(clone=settings)
11618         
11619                 if not unmerge_files:
11620                         if unmerge_action == "unmerge":
11621                                 print
11622                                 print bold("emerge unmerge") + " can only be used with specific package names"
11623                                 print
11624                                 return 0
11625                         else:
11626                                 global_unmerge = 1
11627         
11628                 localtree = vartree
11629                 # process all arguments and add all
11630                 # valid db entries to candidate_catpkgs
11631                 if global_unmerge:
11632                         if not unmerge_files:
11633                                 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11634                 else:
11635                         #we've got command-line arguments
11636                         if not unmerge_files:
11637                                 print "\nNo packages to unmerge have been provided.\n"
11638                                 return 0
11639                         for x in unmerge_files:
11640                                 arg_parts = x.split('/')
11641                                 if x[0] not in [".","/"] and \
11642                                         arg_parts[-1][-7:] != ".ebuild":
11643                                         #possible cat/pkg or dep; treat as such
11644                                         candidate_catpkgs.append(x)
11645                                 elif unmerge_action in ["prune","clean"]:
11646                                         print "\n!!! Prune and clean do not accept individual" + \
11647                                                 " ebuilds as arguments;\n    skipping.\n"
11648                                         continue
11649                                 else:
11650                                         # it appears that the user is specifying an installed
11651                                         # ebuild and we're in "unmerge" mode, so it's ok.
11652                                         if not os.path.exists(x):
11653                                                 print "\n!!! The path '"+x+"' doesn't exist.\n"
11654                                                 return 0
11655         
11656                                         absx   = os.path.abspath(x)
11657                                         sp_absx = absx.split("/")
11658                                         if sp_absx[-1][-7:] == ".ebuild":
11659                                                 del sp_absx[-1]
11660                                                 absx = "/".join(sp_absx)
11661         
11662                                         sp_absx_len = len(sp_absx)
11663         
11664                                         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11665                                         vdb_len  = len(vdb_path)
11666         
11667                                         sp_vdb     = vdb_path.split("/")
11668                                         sp_vdb_len = len(sp_vdb)
11669         
11670                                         if not os.path.exists(absx+"/CONTENTS"):
11671                                                 print "!!! Not a valid db dir: "+str(absx)
11672                                                 return 0
11673         
11674                                         if sp_absx_len <= sp_vdb_len:
11675                                                 # The Path is shorter... so it can't be inside the vdb.
11676                                                 print sp_absx
11677                                                 print absx
11678                                                 print "\n!!!",x,"cannot be inside "+ \
11679                                                         vdb_path+"; aborting.\n"
11680                                                 return 0
11681         
11682                                         for idx in range(0,sp_vdb_len):
11683                                                 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11684                                                         print sp_absx
11685                                                         print absx
11686                                                         print "\n!!!", x, "is not inside "+\
11687                                                                 vdb_path+"; aborting.\n"
11688                                                         return 0
11689         
11690                                         print "="+"/".join(sp_absx[sp_vdb_len:])
11691                                         candidate_catpkgs.append(
11692                                                 "="+"/".join(sp_absx[sp_vdb_len:]))
11693         
11694                 newline=""
11695                 if (not "--quiet" in myopts):
11696                         newline="\n"
11697                 if settings["ROOT"] != "/":
11698                         writemsg_level(darkgreen(newline+ \
11699                                 ">>> Using system located in ROOT tree %s\n" % \
11700                                 settings["ROOT"]))
11701
11702                 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11703                         not ("--quiet" in myopts):
11704                         writemsg_level(darkgreen(newline+\
11705                                 ">>> These are the packages that would be unmerged:\n"))
11706
11707                 # Preservation of order is required for --depclean and --prune so
11708                 # that dependencies are respected. Use all_selected to eliminate
11709                 # duplicate packages since the same package may be selected by
11710                 # multiple atoms.
11711                 pkgmap = []
11712                 all_selected = set()
11713                 for x in candidate_catpkgs:
11714                         # cycle through all our candidate deps and determine
11715                         # what will and will not get unmerged
11716                         try:
11717                                 mymatch = vartree.dbapi.match(x)
11718                         except portage.exception.AmbiguousPackageName, errpkgs:
11719                                 print "\n\n!!! The short ebuild name \"" + \
11720                                         x + "\" is ambiguous.  Please specify"
11721                                 print "!!! one of the following fully-qualified " + \
11722                                         "ebuild names instead:\n"
11723                                 for i in errpkgs[0]:
11724                                         print "    " + green(i)
11725                                 print
11726                                 sys.exit(1)
11727         
11728                         if not mymatch and x[0] not in "<>=~":
11729                                 mymatch = localtree.dep_match(x)
11730                         if not mymatch:
11731                                 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11732                                         (x, unmerge_action), noiselevel=-1)
11733                                 continue
11734
11735                         pkgmap.append(
11736                                 {"protected": set(), "selected": set(), "omitted": set()})
11737                         mykey = len(pkgmap) - 1
11738                         if unmerge_action=="unmerge":
11739                                         for y in mymatch:
11740                                                 if y not in all_selected:
11741                                                         pkgmap[mykey]["selected"].add(y)
11742                                                         all_selected.add(y)
11743                         elif unmerge_action == "prune":
11744                                 if len(mymatch) == 1:
11745                                         continue
11746                                 best_version = mymatch[0]
11747                                 best_slot = vartree.getslot(best_version)
11748                                 best_counter = vartree.dbapi.cpv_counter(best_version)
11749                                 for mypkg in mymatch[1:]:
11750                                         myslot = vartree.getslot(mypkg)
11751                                         mycounter = vartree.dbapi.cpv_counter(mypkg)
11752                                         if (myslot == best_slot and mycounter > best_counter) or \
11753                                                 mypkg == portage.best([mypkg, best_version]):
11754                                                 if myslot == best_slot:
11755                                                         if mycounter < best_counter:
11756                                                                 # On slot collision, keep the one with the
11757                                                                 # highest counter since it is the most
11758                                                                 # recently installed.
11759                                                                 continue
11760                                                 best_version = mypkg
11761                                                 best_slot = myslot
11762                                                 best_counter = mycounter
11763                                 pkgmap[mykey]["protected"].add(best_version)
11764                                 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
11765                                         if mypkg != best_version and mypkg not in all_selected)
11766                                 all_selected.update(pkgmap[mykey]["selected"])
11767                         else:
11768                                 # unmerge_action == "clean"
11769                                 slotmap={}
11770                                 for mypkg in mymatch:
11771                                         if unmerge_action == "clean":
11772                                                 myslot = localtree.getslot(mypkg)
11773                                         else:
11774                                                 # since we're pruning, we don't care about slots
11775                                                 # and put all the pkgs in together
11776                                                 myslot = 0
11777                                         if myslot not in slotmap:
11778                                                 slotmap[myslot] = {}
11779                                         slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
11780
11781                                 for mypkg in vartree.dbapi.cp_list(
11782                                         portage.dep_getkey(mymatch[0])):
11783                                         myslot = vartree.getslot(mypkg)
11784                                         if myslot not in slotmap:
11785                                                 slotmap[myslot] = {}
11786                                         slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
11787
11788                                 for myslot in slotmap:
11789                                         counterkeys = slotmap[myslot].keys()
11790                                         if not counterkeys:
11791                                                 continue
11792                                         counterkeys.sort()
11793                                         pkgmap[mykey]["protected"].add(
11794                                                 slotmap[myslot][counterkeys[-1]])
11795                                         del counterkeys[-1]
11796
11797                                         for counter in counterkeys[:]:
11798                                                 mypkg = slotmap[myslot][counter]
11799                                                 if mypkg not in mymatch:
11800                                                         counterkeys.remove(counter)
11801                                                         pkgmap[mykey]["protected"].add(
11802                                                                 slotmap[myslot][counter])
11803
11804                                         #be pretty and get them in order of merge:
11805                                         for ckey in counterkeys:
11806                                                 mypkg = slotmap[myslot][ckey]
11807                                                 if mypkg not in all_selected:
11808                                                         pkgmap[mykey]["selected"].add(mypkg)
11809                                                         all_selected.add(mypkg)
11810                                         # ok, now the last-merged package
11811                                         # is protected, and the rest are selected
11812                 numselected = len(all_selected)
11813                 if global_unmerge and not numselected:
11814                         portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
11815                         return 0
11816         
11817                 if not numselected:
11818                         portage.writemsg_stdout(
11819                                 "\n>>> No packages selected for removal by " + \
11820                                 unmerge_action + "\n")
11821                         return 0
11822         finally:
11823                 if vdb_lock:
11824                         vartree.dbapi.flush_cache()
11825                         portage.locks.unlockdir(vdb_lock)
11826         
11827         from portage.sets.base import EditablePackageSet
11828         
11829         # generate a list of package sets that are directly or indirectly listed in "world",
11830         # as there is no persistent list of "installed" sets
11831         installed_sets = ["world"]
11832         stop = False
11833         pos = 0
11834         while not stop:
11835                 stop = True
11836                 pos = len(installed_sets)
11837                 for s in installed_sets[pos - 1:]:
11838                         if s not in sets:
11839                                 continue
11840                         candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
11841                         if candidates:
11842                                 stop = False
11843                                 installed_sets += candidates
11844         installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
11845         del stop, pos
11846
11847         # we don't want to unmerge packages that are still listed in user-editable package sets
11848         # listed in "world" as they would be remerged on the next update of "world" or the 
11849         # relevant package sets.
11850         unknown_sets = set()
11851         for cp in xrange(len(pkgmap)):
11852                 for cpv in pkgmap[cp]["selected"].copy():
11853                         try:
11854                                 pkg = _pkg(cpv)
11855                         except KeyError:
11856                                 # It could have been uninstalled
11857                                 # by a concurrent process.
11858                                 continue
11859
11860                         if unmerge_action != "clean" and \
11861                                 root_config.root == "/" and \
11862                                 portage.match_from_list(
11863                                 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
11864                                 msg = ("Not unmerging package %s since there is no valid " + \
11865                                 "reason for portage to unmerge itself.") % (pkg.cpv,)
11866                                 for line in textwrap.wrap(msg, 75):
11867                                         out.eerror(line)
11868                                 # adjust pkgmap so the display output is correct
11869                                 pkgmap[cp]["selected"].remove(cpv)
11870                                 all_selected.remove(cpv)
11871                                 pkgmap[cp]["protected"].add(cpv)
11872                                 continue
11873
11874                         parents = []
11875                         for s in installed_sets:
11876                                 # skip sets that the user requested to unmerge, and skip world 
11877                                 # unless we're unmerging a package set (as the package would be 
11878                                 # removed from "world" later on)
11879                                 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
11880                                         continue
11881
11882                                 if s not in sets:
11883                                         if s in unknown_sets:
11884                                                 continue
11885                                         unknown_sets.add(s)
11886                                         out = portage.output.EOutput()
11887                                         out.eerror(("Unknown set '@%s' in " + \
11888                                                 "%svar/lib/portage/world_sets") % \
11889                                                 (s, root_config.root))
11890                                         continue
11891
11892                                 # only check instances of EditablePackageSet as other classes are generally used for
11893                                 # special purposes and can be ignored here (and are usually generated dynamically, so the
11894                                 # user can't do much about them anyway)
11895                                 if isinstance(sets[s], EditablePackageSet):
11896
11897                                         # This is derived from a snippet of code in the
11898                                         # depgraph._iter_atoms_for_pkg() method.
11899                                         for atom in sets[s].iterAtomsForPackage(pkg):
11900                                                 inst_matches = vartree.dbapi.match(atom)
11901                                                 inst_matches.reverse() # descending order
11902                                                 higher_slot = None
11903                                                 for inst_cpv in inst_matches:
11904                                                         try:
11905                                                                 inst_pkg = _pkg(inst_cpv)
11906                                                         except KeyError:
11907                                                                 # It could have been uninstalled
11908                                                                 # by a concurrent process.
11909                                                                 continue
11910
11911                                                         if inst_pkg.cp != atom.cp:
11912                                                                 continue
11913                                                         if pkg >= inst_pkg:
11914                                                                 # This is descending order, and we're not
11915                                                                 # interested in any versions <= pkg given.
11916                                                                 break
11917                                                         if pkg.slot_atom != inst_pkg.slot_atom:
11918                                                                 higher_slot = inst_pkg
11919                                                                 break
11920                                                 if higher_slot is None:
11921                                                         parents.append(s)
11922                                                         break
11923                         if parents:
11924                                 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
11925                                 #print colorize("WARN", "but still listed in the following package sets:")
11926                                 #print "    %s\n" % ", ".join(parents)
11927                                 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
11928                                 print colorize("WARN", "still referenced by the following package sets:")
11929                                 print "    %s\n" % ", ".join(parents)
11930                                 # adjust pkgmap so the display output is correct
11931                                 pkgmap[cp]["selected"].remove(cpv)
11932                                 all_selected.remove(cpv)
11933                                 pkgmap[cp]["protected"].add(cpv)
11934         
11935         del installed_sets
11936
11937         numselected = len(all_selected)
11938         if not numselected:
11939                 writemsg_level(
11940                         "\n>>> No packages selected for removal by " + \
11941                         unmerge_action + "\n")
11942                 return 0
11943
11944         # Unmerge order only matters in some cases
11945         if not ordered:
11946                 unordered = {}
11947                 for d in pkgmap:
11948                         selected = d["selected"]
11949                         if not selected:
11950                                 continue
11951                         cp = portage.cpv_getkey(iter(selected).next())
11952                         cp_dict = unordered.get(cp)
11953                         if cp_dict is None:
11954                                 cp_dict = {}
11955                                 unordered[cp] = cp_dict
11956                                 for k in d:
11957                                         cp_dict[k] = set()
11958                         for k, v in d.iteritems():
11959                                 cp_dict[k].update(v)
11960                 pkgmap = [unordered[cp] for cp in sorted(unordered)]
11961
11962         for x in xrange(len(pkgmap)):
11963                 selected = pkgmap[x]["selected"]
11964                 if not selected:
11965                         continue
11966                 for mytype, mylist in pkgmap[x].iteritems():
11967                         if mytype == "selected":
11968                                 continue
11969                         mylist.difference_update(all_selected)
11970                 cp = portage.cpv_getkey(iter(selected).next())
11971                 for y in localtree.dep_match(cp):
11972                         if y not in pkgmap[x]["omitted"] and \
11973                                 y not in pkgmap[x]["selected"] and \
11974                                 y not in pkgmap[x]["protected"] and \
11975                                 y not in all_selected:
11976                                 pkgmap[x]["omitted"].add(y)
11977                 if global_unmerge and not pkgmap[x]["selected"]:
11978                         #avoid cluttering the preview printout with stuff that isn't getting unmerged
11979                         continue
11980                 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
11981                         writemsg_level(colorize("BAD","\a\n\n!!! " + \
11982                                 "'%s' is part of your system profile.\n" % cp),
11983                                 level=logging.WARNING, noiselevel=-1)
11984                         writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
11985                                 "be damaging to your system.\n\n"),
11986                                 level=logging.WARNING, noiselevel=-1)
11987                         if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
11988                                 countdown(int(settings["EMERGE_WARNING_DELAY"]),
11989                                         colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
11990                 if not quiet:
11991                         writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
11992                 else:
11993                         writemsg_level(bold(cp) + ": ", noiselevel=-1)
11994                 for mytype in ["selected","protected","omitted"]:
11995                         if not quiet:
11996                                 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
11997                         if pkgmap[x][mytype]:
11998                                 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
11999                                 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
12000                                 for pn, ver, rev in sorted_pkgs:
12001                                         if rev == "r0":
12002                                                 myversion = ver
12003                                         else:
12004                                                 myversion = ver + "-" + rev
12005                                         if mytype == "selected":
12006                                                 writemsg_level(
12007                                                         colorize("UNMERGE_WARN", myversion + " "),
12008                                                         noiselevel=-1)
12009                                         else:
12010                                                 writemsg_level(
12011                                                         colorize("GOOD", myversion + " "), noiselevel=-1)
12012                         else:
12013                                 writemsg_level("none ", noiselevel=-1)
12014                         if not quiet:
12015                                 writemsg_level("\n", noiselevel=-1)
12016                 if quiet:
12017                         writemsg_level("\n", noiselevel=-1)
12018
12019         writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
12020                 " packages are slated for removal.\n")
12021         writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
12022                         " and " + colorize("GOOD", "'omitted'") + \
12023                         " packages will not be removed.\n\n")
12024
12025         if "--pretend" in myopts:
12026                 #we're done... return
12027                 return 0
12028         if "--ask" in myopts:
12029                 if userquery("Would you like to unmerge these packages?")=="No":
12030                         # enter pretend mode for correct formatting of results
12031                         myopts["--pretend"] = True
12032                         print
12033                         print "Quitting."
12034                         print
12035                         return 0
12036         #the real unmerging begins, after a short delay....
12037         if clean_delay and not autoclean:
12038                 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
12039
12040         for x in xrange(len(pkgmap)):
12041                 for y in pkgmap[x]["selected"]:
12042                         writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
12043                         emergelog(xterm_titles, "=== Unmerging... ("+y+")")
12044                         mysplit = y.split("/")
12045                         #unmerge...
12046                         retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
12047                                 mysettings, unmerge_action not in ["clean","prune"],
12048                                 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
12049                                 scheduler=scheduler)
12050
12051                         if retval != os.EX_OK:
12052                                 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
12053                                 if raise_on_error:
12054                                         raise UninstallFailure(retval)
12055                                 sys.exit(retval)
12056                         else:
12057                                 if clean_world and hasattr(sets["world"], "cleanPackage"):
12058                                         sets["world"].cleanPackage(vartree.dbapi, y)
12059                                 emergelog(xterm_titles, " >>> unmerge success: "+y)
12060         if clean_world and hasattr(sets["world"], "remove"):
12061                 for s in root_config.setconfig.active:
12062                         sets["world"].remove(SETPREFIX+s)
12063         return 1
12064
12065 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
12066
12067         if os.path.exists("/usr/bin/install-info"):
12068                 out = portage.output.EOutput()
12069                 regen_infodirs=[]
12070                 for z in infodirs:
12071                         if z=='':
12072                                 continue
12073                         inforoot=normpath(root+z)
12074                         if os.path.isdir(inforoot):
12075                                 infomtime = long(os.stat(inforoot).st_mtime)
12076                                 if inforoot not in prev_mtimes or \
12077                                         prev_mtimes[inforoot] != infomtime:
12078                                                 regen_infodirs.append(inforoot)
12079
12080                 if not regen_infodirs:
12081                         portage.writemsg_stdout("\n")
12082                         out.einfo("GNU info directory index is up-to-date.")
12083                 else:
12084                         portage.writemsg_stdout("\n")
12085                         out.einfo("Regenerating GNU info directory index...")
12086
12087                         dir_extensions = ("", ".gz", ".bz2")
12088                         icount=0
12089                         badcount=0
12090                         errmsg = ""
12091                         for inforoot in regen_infodirs:
12092                                 if inforoot=='':
12093                                         continue
12094
12095                                 if not os.path.isdir(inforoot) or \
12096                                         not os.access(inforoot, os.W_OK):
12097                                         continue
12098
12099                                 file_list = os.listdir(inforoot)
12100                                 file_list.sort()
12101                                 dir_file = os.path.join(inforoot, "dir")
12102                                 moved_old_dir = False
12103                                 processed_count = 0
12104                                 for x in file_list:
12105                                         if x.startswith(".") or \
12106                                                 os.path.isdir(os.path.join(inforoot, x)):
12107                                                 continue
12108                                         if x.startswith("dir"):
12109                                                 skip = False
12110                                                 for ext in dir_extensions:
12111                                                         if x == "dir" + ext or \
12112                                                                 x == "dir" + ext + ".old":
12113                                                                 skip = True
12114                                                                 break
12115                                                 if skip:
12116                                                         continue
12117                                         if processed_count == 0:
12118                                                 for ext in dir_extensions:
12119                                                         try:
12120                                                                 os.rename(dir_file + ext, dir_file + ext + ".old")
12121                                                                 moved_old_dir = True
12122                                                         except EnvironmentError, e:
12123                                                                 if e.errno != errno.ENOENT:
12124                                                                         raise
12125                                                                 del e
12126                                         processed_count += 1
12127                                         myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
12128                                         existsstr="already exists, for file `"
12129                                         if myso!="":
12130                                                 if re.search(existsstr,myso):
12131                                                         # Already exists... Don't increment the count for this.
12132                                                         pass
12133                                                 elif myso[:44]=="install-info: warning: no info dir entry in ":
12134                                                         # This info file doesn't contain a DIR-header: install-info produces this
12135                                                         # (harmless) warning (the --quiet switch doesn't seem to work).
12136                                                         # Don't increment the count for this.
12137                                                         pass
12138                                                 else:
12139                                                         badcount=badcount+1
12140                                                         errmsg += myso + "\n"
12141                                         icount=icount+1
12142
12143                                 if moved_old_dir and not os.path.exists(dir_file):
12144                                         # We didn't generate a new dir file, so put the old file
12145                                         # back where it was originally found.
12146                                         for ext in dir_extensions:
12147                                                 try:
12148                                                         os.rename(dir_file + ext + ".old", dir_file + ext)
12149                                                 except EnvironmentError, e:
12150                                                         if e.errno != errno.ENOENT:
12151                                                                 raise
12152                                                         del e
12153
12154                                 # Clean dir.old cruft so that they don't prevent
12155                                 # unmerge of otherwise empty directories.
12156                                 for ext in dir_extensions:
12157                                         try:
12158                                                 os.unlink(dir_file + ext + ".old")
12159                                         except EnvironmentError, e:
12160                                                 if e.errno != errno.ENOENT:
12161                                                         raise
12162                                                 del e
12163
12164                                 #update mtime so we can potentially avoid regenerating.
12165                                 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
12166
12167                         if badcount:
12168                                 out.eerror("Processed %d info files; %d errors." % \
12169                                         (icount, badcount))
12170                                 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
12171                         else:
12172                                 if icount > 0:
12173                                         out.einfo("Processed %d info files." % (icount,))
12174
12175
12176 def display_news_notification(root_config, myopts):
12177         target_root = root_config.root
12178         trees = root_config.trees
12179         settings = trees["vartree"].settings
12180         portdb = trees["porttree"].dbapi
12181         vardb = trees["vartree"].dbapi
12182         NEWS_PATH = os.path.join("metadata", "news")
12183         UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
12184         newsReaderDisplay = False
12185         update = "--pretend" not in myopts
12186
12187         for repo in portdb.getRepositories():
12188                 unreadItems = checkUpdatedNewsItems(
12189                         portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
12190                 if unreadItems:
12191                         if not newsReaderDisplay:
12192                                 newsReaderDisplay = True
12193                                 print
12194                         print colorize("WARN", " * IMPORTANT:"),
12195                         print "%s news items need reading for repository '%s'." % (unreadItems, repo)
12196                         
12197         
12198         if newsReaderDisplay:
12199                 print colorize("WARN", " *"),
12200                 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
12201                 print
12202
12203 def display_preserved_libs(vardbapi):
12204         MAX_DISPLAY = 3
12205
12206         # Ensure the registry is consistent with existing files.
12207         vardbapi.plib_registry.pruneNonExisting()
12208
12209         if vardbapi.plib_registry.hasEntries():
12210                 print
12211                 print colorize("WARN", "!!!") + " existing preserved libs:"
12212                 plibdata = vardbapi.plib_registry.getPreservedLibs()
12213                 linkmap = vardbapi.linkmap
12214                 consumer_map = {}
12215                 owners = {}
12216                 linkmap_broken = False
12217
12218                 try:
12219                         linkmap.rebuild()
12220                 except portage.exception.CommandNotFound, e:
12221                         writemsg_level("!!! Command Not Found: %s\n" % (e,),
12222                                 level=logging.ERROR, noiselevel=-1)
12223                         del e
12224                         linkmap_broken = True
12225                 else:
12226                         search_for_owners = set()
12227                         for cpv in plibdata:
12228                                 internal_plib_keys = set(linkmap._obj_key(f) \
12229                                         for f in plibdata[cpv])
12230                                 for f in plibdata[cpv]:
12231                                         if f in consumer_map:
12232                                                 continue
12233                                         consumers = []
12234                                         for c in linkmap.findConsumers(f):
12235                                                 # Filter out any consumers that are also preserved libs
12236                                                 # belonging to the same package as the provider.
12237                                                 if linkmap._obj_key(c) not in internal_plib_keys:
12238                                                         consumers.append(c)
12239                                         consumers.sort()
12240                                         consumer_map[f] = consumers
12241                                         search_for_owners.update(consumers[:MAX_DISPLAY+1])
12242
12243                         owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
12244
12245                 for cpv in plibdata:
12246                         print colorize("WARN", ">>>") + " package: %s" % cpv
12247                         samefile_map = {}
12248                         for f in plibdata[cpv]:
12249                                 obj_key = linkmap._obj_key(f)
12250                                 alt_paths = samefile_map.get(obj_key)
12251                                 if alt_paths is None:
12252                                         alt_paths = set()
12253                                         samefile_map[obj_key] = alt_paths
12254                                 alt_paths.add(f)
12255
12256                         for alt_paths in samefile_map.itervalues():
12257                                 alt_paths = sorted(alt_paths)
12258                                 for p in alt_paths:
12259                                         print colorize("WARN", " * ") + " - %s" % (p,)
12260                                 f = alt_paths[0]
12261                                 consumers = consumer_map.get(f, [])
12262                                 for c in consumers[:MAX_DISPLAY]:
12263                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12264                                                 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
12265                                 if len(consumers) == MAX_DISPLAY + 1:
12266                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12267                                                 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
12268                                                 for x in owners.get(consumers[MAX_DISPLAY], [])))
12269                                 elif len(consumers) > MAX_DISPLAY:
12270                                         print colorize("WARN", " * ") + "     used by %d other files" % (len(consumers) - MAX_DISPLAY)
12271                 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
12272
12273
12274 def _flush_elog_mod_echo():
12275         """
12276         Dump the mod_echo output now so that our other
12277         notifications are shown last.
12278         @rtype: bool
12279         @returns: True if messages were shown, False otherwise.
12280         """
12281         messages_shown = False
12282         try:
12283                 from portage.elog import mod_echo
12284         except ImportError:
12285                 pass # happens during downgrade to a version without the module
12286         else:
12287                 messages_shown = bool(mod_echo._items)
12288                 mod_echo.finalize()
12289         return messages_shown
12290
12291 def post_emerge(root_config, myopts, mtimedb, retval):
12292         """
12293         Misc. things to run at the end of a merge session.
12294         
12295         Update Info Files
12296         Update Config Files
12297         Update News Items
12298         Commit mtimeDB
12299         Display preserved libs warnings
12300         Exit Emerge
12301
12302         @param trees: A dictionary mapping each ROOT to it's package databases
12303         @type trees: dict
12304         @param mtimedb: The mtimeDB to store data needed across merge invocations
12305         @type mtimedb: MtimeDB class instance
12306         @param retval: Emerge's return value
12307         @type retval: Int
12308         @rype: None
12309         @returns:
12310         1.  Calls sys.exit(retval)
12311         """
12312
12313         target_root = root_config.root
12314         trees = { target_root : root_config.trees }
12315         vardbapi = trees[target_root]["vartree"].dbapi
12316         settings = vardbapi.settings
12317         info_mtimes = mtimedb["info"]
12318
12319         # Load the most current variables from ${ROOT}/etc/profile.env
12320         settings.unlock()
12321         settings.reload()
12322         settings.regenerate()
12323         settings.lock()
12324
12325         config_protect = settings.get("CONFIG_PROTECT","").split()
12326         infodirs = settings.get("INFOPATH","").split(":") + \
12327                 settings.get("INFODIR","").split(":")
12328
12329         os.chdir("/")
12330
12331         if retval == os.EX_OK:
12332                 exit_msg = " *** exiting successfully."
12333         else:
12334                 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
12335         emergelog("notitles" not in settings.features, exit_msg)
12336
12337         _flush_elog_mod_echo()
12338
12339         counter_hash = settings.get("PORTAGE_COUNTER_HASH")
12340         if "--pretend" in myopts or (counter_hash is not None and \
12341                 counter_hash == vardbapi._counter_hash()):
12342                 display_news_notification(root_config, myopts)
12343                 # If vdb state has not changed then there's nothing else to do.
12344                 sys.exit(retval)
12345
12346         vdb_path = os.path.join(target_root, portage.VDB_PATH)
12347         portage.util.ensure_dirs(vdb_path)
12348         vdb_lock = None
12349         if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
12350                 vdb_lock = portage.locks.lockdir(vdb_path)
12351
12352         if vdb_lock:
12353                 try:
12354                         if "noinfo" not in settings.features:
12355                                 chk_updated_info_files(target_root,
12356                                         infodirs, info_mtimes, retval)
12357                         mtimedb.commit()
12358                 finally:
12359                         if vdb_lock:
12360                                 portage.locks.unlockdir(vdb_lock)
12361
12362         chk_updated_cfg_files(target_root, config_protect)
12363         
12364         display_news_notification(root_config, myopts)
12365         if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
12366                 display_preserved_libs(vardbapi)        
12367
12368         sys.exit(retval)
12369
12370
12371 def chk_updated_cfg_files(target_root, config_protect):
12372         if config_protect:
12373                 #number of directories with some protect files in them
12374                 procount=0
12375                 for x in config_protect:
12376                         x = os.path.join(target_root, x.lstrip(os.path.sep))
12377                         if not os.access(x, os.W_OK):
12378                                 # Avoid Permission denied errors generated
12379                                 # later by `find`.
12380                                 continue
12381                         try:
12382                                 mymode = os.lstat(x).st_mode
12383                         except OSError:
12384                                 continue
12385                         if stat.S_ISLNK(mymode):
12386                                 # We want to treat it like a directory if it
12387                                 # is a symlink to an existing directory.
12388                                 try:
12389                                         real_mode = os.stat(x).st_mode
12390                                         if stat.S_ISDIR(real_mode):
12391                                                 mymode = real_mode
12392                                 except OSError:
12393                                         pass
12394                         if stat.S_ISDIR(mymode):
12395                                 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
12396                         else:
12397                                 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
12398                                         os.path.split(x.rstrip(os.path.sep))
12399                         mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
12400                         a = commands.getstatusoutput(mycommand)
12401                         if a[0] != 0:
12402                                 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
12403                                 sys.stderr.flush()
12404                                 # Show the error message alone, sending stdout to /dev/null.
12405                                 os.system(mycommand + " 1>/dev/null")
12406                         else:
12407                                 files = a[1].split('\0')
12408                                 # split always produces an empty string as the last element
12409                                 if files and not files[-1]:
12410                                         del files[-1]
12411                                 if files:
12412                                         procount += 1
12413                                         print "\n"+colorize("WARN", " * IMPORTANT:"),
12414                                         if stat.S_ISDIR(mymode):
12415                                                  print "%d config files in '%s' need updating." % \
12416                                                         (len(files), x)
12417                                         else:
12418                                                  print "config file '%s' needs updating." % x
12419
12420                 if procount:
12421                         print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
12422                                 " section of the " + bold("emerge")
12423                         print " "+yellow("*")+" man page to learn how to update config files."
12424
12425 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12426         update=False):
12427         """
12428         Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12429         Returns the number of unread (yet relevent) items.
12430         
12431         @param portdb: a portage tree database
12432         @type portdb: pordbapi
12433         @param vardb: an installed package database
12434         @type vardb: vardbapi
12435         @param NEWS_PATH:
12436         @type NEWS_PATH:
12437         @param UNREAD_PATH:
12438         @type UNREAD_PATH:
12439         @param repo_id:
12440         @type repo_id:
12441         @rtype: Integer
12442         @returns:
12443         1.  The number of unread but relevant news items.
12444         
12445         """
12446         from portage.news import NewsManager
12447         manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12448         return manager.getUnreadItems( repo_id, update=update )
12449
12450 def insert_category_into_atom(atom, category):
12451         alphanum = re.search(r'\w', atom)
12452         if alphanum:
12453                 ret = atom[:alphanum.start()] + "%s/" % category + \
12454                         atom[alphanum.start():]
12455         else:
12456                 ret = None
12457         return ret
12458
12459 def is_valid_package_atom(x):
12460         if "/" not in x:
12461                 alphanum = re.search(r'\w', x)
12462                 if alphanum:
12463                         x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12464         return portage.isvalidatom(x)
12465
12466 def show_blocker_docs_link():
12467         print
12468         print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12469         print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12470         print
12471         print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12472         print
12473
12474 def show_mask_docs():
12475         print "For more information, see the MASKED PACKAGES section in the emerge"
12476         print "man page or refer to the Gentoo Handbook."
12477
12478 def action_sync(settings, trees, mtimedb, myopts, myaction):
12479         xterm_titles = "notitles" not in settings.features
12480         emergelog(xterm_titles, " === sync")
12481         myportdir = settings.get("PORTDIR", None)
12482         out = portage.output.EOutput()
12483         if not myportdir:
12484                 sys.stderr.write("!!! PORTDIR is undefined.  Is /etc/make.globals missing?\n")
12485                 sys.exit(1)
12486         if myportdir[-1]=="/":
12487                 myportdir=myportdir[:-1]
12488         try:
12489                 st = os.stat(myportdir)
12490         except OSError:
12491                 st = None
12492         if st is None:
12493                 print ">>>",myportdir,"not found, creating it."
12494                 os.makedirs(myportdir,0755)
12495                 st = os.stat(myportdir)
12496
12497         spawn_kwargs = {}
12498         spawn_kwargs["env"] = settings.environ()
12499         if 'usersync' in settings.features and \
12500                 portage.data.secpass >= 2 and \
12501                 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12502                 st.st_gid != os.getgid() and st.st_mode & 0070):
12503                 try:
12504                         homedir = pwd.getpwuid(st.st_uid).pw_dir
12505                 except KeyError:
12506                         pass
12507                 else:
12508                         # Drop privileges when syncing, in order to match
12509                         # existing uid/gid settings.
12510                         spawn_kwargs["uid"]    = st.st_uid
12511                         spawn_kwargs["gid"]    = st.st_gid
12512                         spawn_kwargs["groups"] = [st.st_gid]
12513                         spawn_kwargs["env"]["HOME"] = homedir
12514                         umask = 0002
12515                         if not st.st_mode & 0020:
12516                                 umask = umask | 0020
12517                         spawn_kwargs["umask"] = umask
12518
12519         syncuri = settings.get("SYNC", "").strip()
12520         if not syncuri:
12521                 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12522                         noiselevel=-1, level=logging.ERROR)
12523                 return 1
12524
12525         vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12526         vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12527
12528         os.umask(0022)
12529         dosyncuri = syncuri
12530         updatecache_flg = False
12531         if myaction == "metadata":
12532                 print "skipping sync"
12533                 updatecache_flg = True
12534         elif ".git" in vcs_dirs:
12535                 # Update existing git repository, and ignore the syncuri. We are
12536                 # going to trust the user and assume that the user is in the branch
12537                 # that he/she wants updated. We'll let the user manage branches with
12538                 # git directly.
12539                 if portage.process.find_binary("git") is None:
12540                         msg = ["Command not found: git",
12541                         "Type \"emerge dev-util/git\" to enable git support."]
12542                         for l in msg:
12543                                 writemsg_level("!!! %s\n" % l,
12544                                         level=logging.ERROR, noiselevel=-1)
12545                         return 1
12546                 msg = ">>> Starting git pull in %s..." % myportdir
12547                 emergelog(xterm_titles, msg )
12548                 writemsg_level(msg + "\n")
12549                 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12550                         (portage._shell_quote(myportdir),), **spawn_kwargs)
12551                 if exitcode != os.EX_OK:
12552                         msg = "!!! git pull error in %s." % myportdir
12553                         emergelog(xterm_titles, msg)
12554                         writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12555                         return exitcode
12556                 msg = ">>> Git pull in %s successful" % myportdir
12557                 emergelog(xterm_titles, msg)
12558                 writemsg_level(msg + "\n")
12559                 exitcode = git_sync_timestamps(settings, myportdir)
12560                 if exitcode == os.EX_OK:
12561                         updatecache_flg = True
12562         elif syncuri[:8]=="rsync://":
12563                 for vcs_dir in vcs_dirs:
12564                         writemsg_level(("!!! %s appears to be under revision " + \
12565                                 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12566                                 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12567                         return 1
12568                 if not os.path.exists("/usr/bin/rsync"):
12569                         print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12570                         print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12571                         sys.exit(1)
12572                 mytimeout=180
12573
12574                 rsync_opts = []
12575                 if settings["PORTAGE_RSYNC_OPTS"] == "":
12576                         portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12577                         rsync_opts.extend([
12578                                 "--recursive",    # Recurse directories
12579                                 "--links",        # Consider symlinks
12580                                 "--safe-links",   # Ignore links outside of tree
12581                                 "--perms",        # Preserve permissions
12582                                 "--times",        # Preserive mod times
12583                                 "--compress",     # Compress the data transmitted
12584                                 "--force",        # Force deletion on non-empty dirs
12585                                 "--whole-file",   # Don't do block transfers, only entire files
12586                                 "--delete",       # Delete files that aren't in the master tree
12587                                 "--stats",        # Show final statistics about what was transfered
12588                                 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12589                                 "--exclude=/distfiles",   # Exclude distfiles from consideration
12590                                 "--exclude=/local",       # Exclude local     from consideration
12591                                 "--exclude=/packages",    # Exclude packages  from consideration
12592                         ])
12593
12594                 else:
12595                         # The below validation is not needed when using the above hardcoded
12596                         # defaults.
12597
12598                         portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12599                         rsync_opts.extend(
12600                                 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
12601                         for opt in ("--recursive", "--times"):
12602                                 if opt not in rsync_opts:
12603                                         portage.writemsg(yellow("WARNING:") + " adding required option " + \
12604                                         "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12605                                         rsync_opts.append(opt)
12606         
12607                         for exclude in ("distfiles", "local", "packages"):
12608                                 opt = "--exclude=/%s" % exclude
12609                                 if opt not in rsync_opts:
12610                                         portage.writemsg(yellow("WARNING:") + \
12611                                         " adding required option %s not included in "  % opt + \
12612                                         "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12613                                         rsync_opts.append(opt)
12614         
12615                         if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12616                                 def rsync_opt_startswith(opt_prefix):
12617                                         for x in rsync_opts:
12618                                                 if x.startswith(opt_prefix):
12619                                                         return True
12620                                         return False
12621
12622                                 if not rsync_opt_startswith("--timeout="):
12623                                         rsync_opts.append("--timeout=%d" % mytimeout)
12624
12625                                 for opt in ("--compress", "--whole-file"):
12626                                         if opt not in rsync_opts:
12627                                                 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12628                                                 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12629                                                 rsync_opts.append(opt)
12630
12631                 if "--quiet" in myopts:
12632                         rsync_opts.append("--quiet")    # Shut up a lot
12633                 else:
12634                         rsync_opts.append("--verbose")  # Print filelist
12635
12636                 if "--verbose" in myopts:
12637                         rsync_opts.append("--progress")  # Progress meter for each file
12638
12639                 if "--debug" in myopts:
12640                         rsync_opts.append("--checksum") # Force checksum on all files
12641
12642                 # Real local timestamp file.
12643                 servertimestampfile = os.path.join(
12644                         myportdir, "metadata", "timestamp.chk")
12645
12646                 content = portage.util.grabfile(servertimestampfile)
12647                 mytimestamp = 0
12648                 if content:
12649                         try:
12650                                 mytimestamp = time.mktime(time.strptime(content[0],
12651                                         "%a, %d %b %Y %H:%M:%S +0000"))
12652                         except (OverflowError, ValueError):
12653                                 pass
12654                 del content
12655
12656                 try:
12657                         rsync_initial_timeout = \
12658                                 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12659                 except ValueError:
12660                         rsync_initial_timeout = 15
12661
12662                 try:
12663                         maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12664                 except SystemExit, e:
12665                         raise # Needed else can't exit
12666                 except:
12667                         maxretries=3 #default number of retries
12668
12669                 retries=0
12670                 user_name, hostname, port = re.split(
12671                         "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12672                 if port is None:
12673                         port=""
12674                 if user_name is None:
12675                         user_name=""
12676                 updatecache_flg=True
12677                 all_rsync_opts = set(rsync_opts)
12678                 extra_rsync_opts = shlex.split(
12679                         settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
12680                 all_rsync_opts.update(extra_rsync_opts)
12681                 family = socket.AF_INET
12682                 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12683                         family = socket.AF_INET
12684                 elif socket.has_ipv6 and \
12685                         ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12686                         family = socket.AF_INET6
12687                 ips=[]
12688                 SERVER_OUT_OF_DATE = -1
12689                 EXCEEDED_MAX_RETRIES = -2
12690                 while (1):
12691                         if ips:
12692                                 del ips[0]
12693                         if ips==[]:
12694                                 try:
12695                                         for addrinfo in socket.getaddrinfo(
12696                                                 hostname, None, family, socket.SOCK_STREAM):
12697                                                 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12698                                                         # IPv6 addresses need to be enclosed in square brackets
12699                                                         ips.append("[%s]" % addrinfo[4][0])
12700                                                 else:
12701                                                         ips.append(addrinfo[4][0])
12702                                         from random import shuffle
12703                                         shuffle(ips)
12704                                 except SystemExit, e:
12705                                         raise # Needed else can't exit
12706                                 except Exception, e:
12707                                         print "Notice:",str(e)
12708                                         dosyncuri=syncuri
12709
12710                         if ips:
12711                                 try:
12712                                         dosyncuri = syncuri.replace(
12713                                                 "//" + user_name + hostname + port + "/",
12714                                                 "//" + user_name + ips[0] + port + "/", 1)
12715                                 except SystemExit, e:
12716                                         raise # Needed else can't exit
12717                                 except Exception, e:
12718                                         print "Notice:",str(e)
12719                                         dosyncuri=syncuri
12720
12721                         if (retries==0):
12722                                 if "--ask" in myopts:
12723                                         if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12724                                                 print
12725                                                 print "Quitting."
12726                                                 print
12727                                                 sys.exit(0)
12728                                 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12729                                 if "--quiet" not in myopts:
12730                                         print ">>> Starting rsync with "+dosyncuri+"..."
12731                         else:
12732                                 emergelog(xterm_titles,
12733                                         ">>> Starting retry %d of %d with %s" % \
12734                                                 (retries,maxretries,dosyncuri))
12735                                 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
12736
12737                         if mytimestamp != 0 and "--quiet" not in myopts:
12738                                 print ">>> Checking server timestamp ..."
12739
12740                         rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
12741
12742                         if "--debug" in myopts:
12743                                 print rsynccommand
12744
12745                         exitcode = os.EX_OK
12746                         servertimestamp = 0
12747                         # Even if there's no timestamp available locally, fetch the
12748                         # timestamp anyway as an initial probe to verify that the server is
12749                         # responsive.  This protects us from hanging indefinitely on a
12750                         # connection attempt to an unresponsive server which rsync's
12751                         # --timeout option does not prevent.
12752                         if True:
12753                                 # Temporary file for remote server timestamp comparison.
12754                                 from tempfile import mkstemp
12755                                 fd, tmpservertimestampfile = mkstemp()
12756                                 os.close(fd)
12757                                 mycommand = rsynccommand[:]
12758                                 mycommand.append(dosyncuri.rstrip("/") + \
12759                                         "/metadata/timestamp.chk")
12760                                 mycommand.append(tmpservertimestampfile)
12761                                 content = None
12762                                 mypids = []
12763                                 try:
12764                                         def timeout_handler(signum, frame):
12765                                                 raise portage.exception.PortageException("timed out")
12766                                         signal.signal(signal.SIGALRM, timeout_handler)
12767                                         # Timeout here in case the server is unresponsive.  The
12768                                         # --timeout rsync option doesn't apply to the initial
12769                                         # connection attempt.
12770                                         if rsync_initial_timeout:
12771                                                 signal.alarm(rsync_initial_timeout)
12772                                         try:
12773                                                 mypids.extend(portage.process.spawn(
12774                                                         mycommand, env=settings.environ(), returnpid=True))
12775                                                 exitcode = os.waitpid(mypids[0], 0)[1]
12776                                                 content = portage.grabfile(tmpservertimestampfile)
12777                                         finally:
12778                                                 if rsync_initial_timeout:
12779                                                         signal.alarm(0)
12780                                                 try:
12781                                                         os.unlink(tmpservertimestampfile)
12782                                                 except OSError:
12783                                                         pass
12784                                 except portage.exception.PortageException, e:
12785                                         # timed out
12786                                         print e
12787                                         del e
12788                                         if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
12789                                                 os.kill(mypids[0], signal.SIGTERM)
12790                                                 os.waitpid(mypids[0], 0)
12791                                         # This is the same code rsync uses for timeout.
12792                                         exitcode = 30
12793                                 else:
12794                                         if exitcode != os.EX_OK:
12795                                                 if exitcode & 0xff:
12796                                                         exitcode = (exitcode & 0xff) << 8
12797                                                 else:
12798                                                         exitcode = exitcode >> 8
12799                                 if mypids:
12800                                         portage.process.spawned_pids.remove(mypids[0])
12801                                 if content:
12802                                         try:
12803                                                 servertimestamp = time.mktime(time.strptime(
12804                                                         content[0], "%a, %d %b %Y %H:%M:%S +0000"))
12805                                         except (OverflowError, ValueError):
12806                                                 pass
12807                                 del mycommand, mypids, content
12808                         if exitcode == os.EX_OK:
12809                                 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
12810                                         emergelog(xterm_titles,
12811                                                 ">>> Cancelling sync -- Already current.")
12812                                         print
12813                                         print ">>>"
12814                                         print ">>> Timestamps on the server and in the local repository are the same."
12815                                         print ">>> Cancelling all further sync action. You are already up to date."
12816                                         print ">>>"
12817                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
12818                                         print ">>>"
12819                                         print
12820                                         sys.exit(0)
12821                                 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
12822                                         emergelog(xterm_titles,
12823                                                 ">>> Server out of date: %s" % dosyncuri)
12824                                         print
12825                                         print ">>>"
12826                                         print ">>> SERVER OUT OF DATE: %s" % dosyncuri
12827                                         print ">>>"
12828                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
12829                                         print ">>>"
12830                                         print
12831                                         exitcode = SERVER_OUT_OF_DATE
12832                                 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
12833                                         # actual sync
12834                                         mycommand = rsynccommand + [dosyncuri+"/", myportdir]
12835                                         exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
12836                                         if exitcode in [0,1,3,4,11,14,20,21]:
12837                                                 break
12838                         elif exitcode in [1,3,4,11,14,20,21]:
12839                                 break
12840                         else:
12841                                 # Code 2 indicates protocol incompatibility, which is expected
12842                                 # for servers with protocol < 29 that don't support
12843                                 # --prune-empty-directories.  Retry for a server that supports
12844                                 # at least rsync protocol version 29 (>=rsync-2.6.4).
12845                                 pass
12846
12847                         retries=retries+1
12848
12849                         if retries<=maxretries:
12850                                 print ">>> Retrying..."
12851                                 time.sleep(11)
12852                         else:
12853                                 # over retries
12854                                 # exit loop
12855                                 updatecache_flg=False
12856                                 exitcode = EXCEEDED_MAX_RETRIES
12857                                 break
12858
12859                 if (exitcode==0):
12860                         emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
12861                 elif exitcode == SERVER_OUT_OF_DATE:
12862                         sys.exit(1)
12863                 elif exitcode == EXCEEDED_MAX_RETRIES:
12864                         sys.stderr.write(
12865                                 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
12866                         sys.exit(1)
12867                 elif (exitcode>0):
12868                         msg = []
12869                         if exitcode==1:
12870                                 msg.append("Rsync has reported that there is a syntax error. Please ensure")
12871                                 msg.append("that your SYNC statement is proper.")
12872                                 msg.append("SYNC=" + settings["SYNC"])
12873                         elif exitcode==11:
12874                                 msg.append("Rsync has reported that there is a File IO error. Normally")
12875                                 msg.append("this means your disk is full, but can be caused by corruption")
12876                                 msg.append("on the filesystem that contains PORTDIR. Please investigate")
12877                                 msg.append("and try again after the problem has been fixed.")
12878                                 msg.append("PORTDIR=" + settings["PORTDIR"])
12879                         elif exitcode==20:
12880                                 msg.append("Rsync was killed before it finished.")
12881                         else:
12882                                 msg.append("Rsync has not successfully finished. It is recommended that you keep")
12883                                 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
12884                                 msg.append("to use rsync due to firewall or other restrictions. This should be a")
12885                                 msg.append("temporary problem unless complications exist with your network")
12886                                 msg.append("(and possibly your system's filesystem) configuration.")
12887                         for line in msg:
12888                                 out.eerror(line)
12889                         sys.exit(exitcode)
12890         elif syncuri[:6]=="cvs://":
12891                 if not os.path.exists("/usr/bin/cvs"):
12892                         print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
12893                         print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
12894                         sys.exit(1)
12895                 cvsroot=syncuri[6:]
12896                 cvsdir=os.path.dirname(myportdir)
12897                 if not os.path.exists(myportdir+"/CVS"):
12898                         #initial checkout
12899                         print ">>> Starting initial cvs checkout with "+syncuri+"..."
12900                         if os.path.exists(cvsdir+"/gentoo-x86"):
12901                                 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
12902                                 sys.exit(1)
12903                         try:
12904                                 os.rmdir(myportdir)
12905                         except OSError, e:
12906                                 if e.errno != errno.ENOENT:
12907                                         sys.stderr.write(
12908                                                 "!!! existing '%s' directory; exiting.\n" % myportdir)
12909                                         sys.exit(1)
12910                                 del e
12911                         if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
12912                                 print "!!! cvs checkout error; exiting."
12913                                 sys.exit(1)
12914                         os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
12915                 else:
12916                         #cvs update
12917                         print ">>> Starting cvs update with "+syncuri+"..."
12918                         retval = portage.process.spawn_bash(
12919                                 "cd %s; cvs -z0 -q update -dP" % \
12920                                 (portage._shell_quote(myportdir),), **spawn_kwargs)
12921                         if retval != os.EX_OK:
12922                                 sys.exit(retval)
12923                 dosyncuri = syncuri
12924         else:
12925                 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
12926                         noiselevel=-1, level=logging.ERROR)
12927                 return 1
12928
12929         if updatecache_flg and  \
12930                 myaction != "metadata" and \
12931                 "metadata-transfer" not in settings.features:
12932                 updatecache_flg = False
12933
12934         # Reload the whole config from scratch.
12935         settings, trees, mtimedb = load_emerge_config(trees=trees)
12936         root_config = trees[settings["ROOT"]]["root_config"]
12937         portdb = trees[settings["ROOT"]]["porttree"].dbapi
12938
12939         if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
12940                 action_metadata(settings, portdb, myopts)
12941
12942         if portage._global_updates(trees, mtimedb["updates"]):
12943                 mtimedb.commit()
12944                 # Reload the whole config from scratch.
12945                 settings, trees, mtimedb = load_emerge_config(trees=trees)
12946                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12947                 root_config = trees[settings["ROOT"]]["root_config"]
12948
12949         mybestpv = portdb.xmatch("bestmatch-visible",
12950                 portage.const.PORTAGE_PACKAGE_ATOM)
12951         mypvs = portage.best(
12952                 trees[settings["ROOT"]]["vartree"].dbapi.match(
12953                 portage.const.PORTAGE_PACKAGE_ATOM))
12954
12955         chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
12956
12957         if myaction != "metadata":
12958                 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
12959                         retval = portage.process.spawn(
12960                                 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
12961                                 dosyncuri], env=settings.environ())
12962                         if retval != os.EX_OK:
12963                                 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
12964
12965         if(mybestpv != mypvs) and not "--quiet" in myopts:
12966                 print
12967                 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
12968                 print red(" * ")+"that you update portage now, before any other packages are updated."
12969                 print
12970                 print red(" * ")+"To update portage, run 'emerge portage' now."
12971                 print
12972         
12973         display_news_notification(root_config, myopts)
12974         return os.EX_OK
12975
12976 def git_sync_timestamps(settings, portdir):
12977         """
12978         Since git doesn't preserve timestamps, synchronize timestamps between
12979         entries and ebuilds/eclasses. Assume the cache has the correct timestamp
12980         for a given file as long as the file in the working tree is not modified
12981         (relative to HEAD).
12982         """
12983         cache_dir = os.path.join(portdir, "metadata", "cache")
12984         if not os.path.isdir(cache_dir):
12985                 return os.EX_OK
12986         writemsg_level(">>> Synchronizing timestamps...\n")
12987
12988         from portage.cache.cache_errors import CacheError
12989         try:
12990                 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
12991                         portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
12992         except CacheError, e:
12993                 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
12994                         level=logging.ERROR, noiselevel=-1)
12995                 return 1
12996
12997         ec_dir = os.path.join(portdir, "eclass")
12998         try:
12999                 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
13000                         if f.endswith(".eclass"))
13001         except OSError, e:
13002                 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
13003                         level=logging.ERROR, noiselevel=-1)
13004                 return 1
13005
13006         args = [portage.const.BASH_BINARY, "-c",
13007                 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
13008                 portage._shell_quote(portdir)]
13009         import subprocess
13010         proc = subprocess.Popen(args, stdout=subprocess.PIPE)
13011         modified_files = set(l.rstrip("\n") for l in proc.stdout)
13012         rval = proc.wait()
13013         if rval != os.EX_OK:
13014                 return rval
13015
13016         modified_eclasses = set(ec for ec in ec_names \
13017                 if os.path.join("eclass", ec + ".eclass") in modified_files)
13018
13019         updated_ec_mtimes = {}
13020
13021         for cpv in cache_db:
13022                 cpv_split = portage.catpkgsplit(cpv)
13023                 if cpv_split is None:
13024                         writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
13025                                 level=logging.ERROR, noiselevel=-1)
13026                         continue
13027
13028                 cat, pn, ver, rev = cpv_split
13029                 cat, pf = portage.catsplit(cpv)
13030                 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
13031                 if relative_eb_path in modified_files:
13032                         continue
13033
13034                 try:
13035                         cache_entry = cache_db[cpv]
13036                         eb_mtime = cache_entry.get("_mtime_")
13037                         ec_mtimes = cache_entry.get("_eclasses_")
13038                 except KeyError:
13039                         writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
13040                                 level=logging.ERROR, noiselevel=-1)
13041                         continue
13042                 except CacheError, e:
13043                         writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
13044                                 (cpv, e), level=logging.ERROR, noiselevel=-1)
13045                         continue
13046
13047                 if eb_mtime is None:
13048                         writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
13049                                 level=logging.ERROR, noiselevel=-1)
13050                         continue
13051
13052                 try:
13053                         eb_mtime = long(eb_mtime)
13054                 except ValueError:
13055                         writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
13056                                 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
13057                         continue
13058
13059                 if ec_mtimes is None:
13060                         writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
13061                                 level=logging.ERROR, noiselevel=-1)
13062                         continue
13063
13064                 if modified_eclasses.intersection(ec_mtimes):
13065                         continue
13066
13067                 missing_eclasses = set(ec_mtimes).difference(ec_names)
13068                 if missing_eclasses:
13069                         writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
13070                                 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
13071                                 noiselevel=-1)
13072                         continue
13073
13074                 eb_path = os.path.join(portdir, relative_eb_path)
13075                 try:
13076                         current_eb_mtime = os.stat(eb_path)
13077                 except OSError:
13078                         writemsg_level("!!! Missing ebuild: %s\n" % \
13079                                 (cpv,), level=logging.ERROR, noiselevel=-1)
13080                         continue
13081
13082                 inconsistent = False
13083                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13084                         updated_mtime = updated_ec_mtimes.get(ec)
13085                         if updated_mtime is not None and updated_mtime != ec_mtime:
13086                                 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
13087                                         (cpv, ec), level=logging.ERROR, noiselevel=-1)
13088                                 inconsistent = True
13089                                 break
13090
13091                 if inconsistent:
13092                         continue
13093
13094                 if current_eb_mtime != eb_mtime:
13095                         os.utime(eb_path, (eb_mtime, eb_mtime))
13096
13097                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13098                         if ec in updated_ec_mtimes:
13099                                 continue
13100                         ec_path = os.path.join(ec_dir, ec + ".eclass")
13101                         current_mtime = long(os.stat(ec_path).st_mtime)
13102                         if current_mtime != ec_mtime:
13103                                 os.utime(ec_path, (ec_mtime, ec_mtime))
13104                         updated_ec_mtimes[ec] = ec_mtime
13105
13106         return os.EX_OK
13107
13108 def action_metadata(settings, portdb, myopts):
13109         portage.writemsg_stdout("\n>>> Updating Portage cache:      ")
13110         old_umask = os.umask(0002)
13111         cachedir = os.path.normpath(settings.depcachedir)
13112         if cachedir in ["/",    "/bin", "/dev",  "/etc",  "/home",
13113                                         "/lib", "/opt", "/proc", "/root", "/sbin",
13114                                         "/sys", "/tmp", "/usr",  "/var"]:
13115                 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
13116                         "ROOT DIRECTORY ON YOUR SYSTEM."
13117                 print >> sys.stderr, \
13118                         "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
13119                 sys.exit(73)
13120         if not os.path.exists(cachedir):
13121                 os.mkdir(cachedir)
13122
13123         ec = portage.eclass_cache.cache(portdb.porttree_root)
13124         myportdir = os.path.realpath(settings["PORTDIR"])
13125         cm = settings.load_best_module("portdbapi.metadbmodule")(
13126                 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13127
13128         from portage.cache import util
13129
13130         class percentage_noise_maker(util.quiet_mirroring):
13131                 def __init__(self, dbapi):
13132                         self.dbapi = dbapi
13133                         self.cp_all = dbapi.cp_all()
13134                         l = len(self.cp_all)
13135                         self.call_update_min = 100000000
13136                         self.min_cp_all = l/100.0
13137                         self.count = 1
13138                         self.pstr = ''
13139
13140                 def __iter__(self):
13141                         for x in self.cp_all:
13142                                 self.count += 1
13143                                 if self.count > self.min_cp_all:
13144                                         self.call_update_min = 0
13145                                         self.count = 0
13146                                 for y in self.dbapi.cp_list(x):
13147                                         yield y
13148                         self.call_update_mine = 0
13149
13150                 def update(self, *arg):
13151                         try:                            self.pstr = int(self.pstr) + 1
13152                         except ValueError:      self.pstr = 1
13153                         sys.stdout.write("%s%i%%" % \
13154                                 ("\b" * (len(str(self.pstr))+1), self.pstr))
13155                         sys.stdout.flush()
13156                         self.call_update_min = 10000000
13157
13158                 def finish(self, *arg):
13159                         sys.stdout.write("\b\b\b\b100%\n")
13160                         sys.stdout.flush()
13161
13162         if "--quiet" in myopts:
13163                 def quicky_cpv_generator(cp_all_list):
13164                         for x in cp_all_list:
13165                                 for y in portdb.cp_list(x):
13166                                         yield y
13167                 source = quicky_cpv_generator(portdb.cp_all())
13168                 noise_maker = portage.cache.util.quiet_mirroring()
13169         else:
13170                 noise_maker = source = percentage_noise_maker(portdb)
13171         portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
13172                 eclass_cache=ec, verbose_instance=noise_maker)
13173
13174         sys.stdout.flush()
13175         os.umask(old_umask)
13176
13177 def action_regen(settings, portdb, max_jobs, max_load):
13178         xterm_titles = "notitles" not in settings.features
13179         emergelog(xterm_titles, " === regen")
13180         #regenerate cache entries
13181         portage.writemsg_stdout("Regenerating cache entries...\n")
13182         try:
13183                 os.close(sys.stdin.fileno())
13184         except SystemExit, e:
13185                 raise # Needed else can't exit
13186         except:
13187                 pass
13188         sys.stdout.flush()
13189
13190         regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
13191         regen.run()
13192
13193         portage.writemsg_stdout("done!\n")
13194         return regen.returncode
13195
13196 def action_config(settings, trees, myopts, myfiles):
13197         if len(myfiles) != 1:
13198                 print red("!!! config can only take a single package atom at this time\n")
13199                 sys.exit(1)
13200         if not is_valid_package_atom(myfiles[0]):
13201                 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
13202                         noiselevel=-1)
13203                 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
13204                 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
13205                 sys.exit(1)
13206         print
13207         try:
13208                 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
13209         except portage.exception.AmbiguousPackageName, e:
13210                 # Multiple matches thrown from cpv_expand
13211                 pkgs = e.args[0]
13212         if len(pkgs) == 0:
13213                 print "No packages found.\n"
13214                 sys.exit(0)
13215         elif len(pkgs) > 1:
13216                 if "--ask" in myopts:
13217                         options = []
13218                         print "Please select a package to configure:"
13219                         idx = 0
13220                         for pkg in pkgs:
13221                                 idx += 1
13222                                 options.append(str(idx))
13223                                 print options[-1]+") "+pkg
13224                         print "X) Cancel"
13225                         options.append("X")
13226                         idx = userquery("Selection?", options)
13227                         if idx == "X":
13228                                 sys.exit(0)
13229                         pkg = pkgs[int(idx)-1]
13230                 else:
13231                         print "The following packages available:"
13232                         for pkg in pkgs:
13233                                 print "* "+pkg
13234                         print "\nPlease use a specific atom or the --ask option."
13235                         sys.exit(1)
13236         else:
13237                 pkg = pkgs[0]
13238
13239         print
13240         if "--ask" in myopts:
13241                 if userquery("Ready to configure "+pkg+"?") == "No":
13242                         sys.exit(0)
13243         else:
13244                 print "Configuring pkg..."
13245         print
13246         ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
13247         mysettings = portage.config(clone=settings)
13248         vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
13249         debug = mysettings.get("PORTAGE_DEBUG") == "1"
13250         retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
13251                 mysettings,
13252                 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
13253                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
13254         if retval == os.EX_OK:
13255                 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
13256                         mysettings, debug=debug, mydbapi=vardb, tree="vartree")
13257         print
13258
13259 def action_info(settings, trees, myopts, myfiles):
13260         print getportageversion(settings["PORTDIR"], settings["ROOT"],
13261                 settings.profile_path, settings["CHOST"],
13262                 trees[settings["ROOT"]]["vartree"].dbapi)
13263         header_width = 65
13264         header_title = "System Settings"
13265         if myfiles:
13266                 print header_width * "="
13267                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13268         print header_width * "="
13269         print "System uname: "+platform.platform(aliased=1)
13270
13271         lastSync = portage.grabfile(os.path.join(
13272                 settings["PORTDIR"], "metadata", "timestamp.chk"))
13273         print "Timestamp of tree:",
13274         if lastSync:
13275                 print lastSync[0]
13276         else:
13277                 print "Unknown"
13278
13279         output=commands.getstatusoutput("distcc --version")
13280         if not output[0]:
13281                 print str(output[1].split("\n",1)[0]),
13282                 if "distcc" in settings.features:
13283                         print "[enabled]"
13284                 else:
13285                         print "[disabled]"
13286
13287         output=commands.getstatusoutput("ccache -V")
13288         if not output[0]:
13289                 print str(output[1].split("\n",1)[0]),
13290                 if "ccache" in settings.features:
13291                         print "[enabled]"
13292                 else:
13293                         print "[disabled]"
13294
13295         myvars  = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
13296                    "sys-devel/binutils", "sys-devel/libtool",  "dev-lang/python"]
13297         myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
13298         myvars  = portage.util.unique_array(myvars)
13299         myvars.sort()
13300
13301         for x in myvars:
13302                 if portage.isvalidatom(x):
13303                         pkg_matches = trees["/"]["vartree"].dbapi.match(x)
13304                         pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
13305                         pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
13306                         pkgs = []
13307                         for pn, ver, rev in pkg_matches:
13308                                 if rev != "r0":
13309                                         pkgs.append(ver + "-" + rev)
13310                                 else:
13311                                         pkgs.append(ver)
13312                         if pkgs:
13313                                 pkgs = ", ".join(pkgs)
13314                                 print "%-20s %s" % (x+":", pkgs)
13315                 else:
13316                         print "%-20s %s" % (x+":", "[NOT VALID]")
13317
13318         libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
13319
13320         if "--verbose" in myopts:
13321                 myvars=settings.keys()
13322         else:
13323                 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
13324                           'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
13325                           'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
13326                           'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
13327
13328                 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
13329
13330         myvars = portage.util.unique_array(myvars)
13331         unset_vars = []
13332         myvars.sort()
13333         for x in myvars:
13334                 if x in settings:
13335                         if x != "USE":
13336                                 print '%s="%s"' % (x, settings[x])
13337                         else:
13338                                 use = set(settings["USE"].split())
13339                                 use_expand = settings["USE_EXPAND"].split()
13340                                 use_expand.sort()
13341                                 for varname in use_expand:
13342                                         flag_prefix = varname.lower() + "_"
13343                                         for f in list(use):
13344                                                 if f.startswith(flag_prefix):
13345                                                         use.remove(f)
13346                                 use = list(use)
13347                                 use.sort()
13348                                 print 'USE="%s"' % " ".join(use),
13349                                 for varname in use_expand:
13350                                         myval = settings.get(varname)
13351                                         if myval:
13352                                                 print '%s="%s"' % (varname, myval),
13353                                 print
13354                 else:
13355                         unset_vars.append(x)
13356         if unset_vars:
13357                 print "Unset:  "+", ".join(unset_vars)
13358         print
13359
13360         if "--debug" in myopts:
13361                 for x in dir(portage):
13362                         module = getattr(portage, x)
13363                         if "cvs_id_string" in dir(module):
13364                                 print "%s: %s" % (str(x), str(module.cvs_id_string))
13365
13366         # See if we can find any packages installed matching the strings
13367         # passed on the command line
13368         mypkgs = []
13369         vardb = trees[settings["ROOT"]]["vartree"].dbapi
13370         portdb = trees[settings["ROOT"]]["porttree"].dbapi
13371         for x in myfiles:
13372                 mypkgs.extend(vardb.match(x))
13373
13374         # If some packages were found...
13375         if mypkgs:
13376                 # Get our global settings (we only print stuff if it varies from
13377                 # the current config)
13378                 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
13379                 auxkeys = mydesiredvars + [ "USE", "IUSE"]
13380                 global_vals = {}
13381                 pkgsettings = portage.config(clone=settings)
13382
13383                 for myvar in mydesiredvars:
13384                         global_vals[myvar] = set(settings.get(myvar, "").split())
13385
13386                 # Loop through each package
13387                 # Only print settings if they differ from global settings
13388                 header_title = "Package Settings"
13389                 print header_width * "="
13390                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13391                 print header_width * "="
13392                 from portage.output import EOutput
13393                 out = EOutput()
13394                 for pkg in mypkgs:
13395                         # Get all package specific variables
13396                         auxvalues = vardb.aux_get(pkg, auxkeys)
13397                         valuesmap = {}
13398                         for i in xrange(len(auxkeys)):
13399                                 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
13400                         diff_values = {}
13401                         for myvar in mydesiredvars:
13402                                 # If the package variable doesn't match the
13403                                 # current global variable, something has changed
13404                                 # so set diff_found so we know to print
13405                                 if valuesmap[myvar] != global_vals[myvar]:
13406                                         diff_values[myvar] = valuesmap[myvar]
13407                         valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
13408                         valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
13409                         pkgsettings.reset()
13410                         # If a matching ebuild is no longer available in the tree, maybe it
13411                         # would make sense to compare against the flags for the best
13412                         # available version with the same slot?
13413                         mydb = None
13414                         if portdb.cpv_exists(pkg):
13415                                 mydb = portdb
13416                         pkgsettings.setcpv(pkg, mydb=mydb)
13417                         if valuesmap["IUSE"].intersection(
13418                                 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
13419                                 diff_values["USE"] = valuesmap["USE"]
13420                         # If a difference was found, print the info for
13421                         # this package.
13422                         if diff_values:
13423                                 # Print package info
13424                                 print "%s was built with the following:" % pkg
13425                                 for myvar in mydesiredvars + ["USE"]:
13426                                         if myvar in diff_values:
13427                                                 mylist = list(diff_values[myvar])
13428                                                 mylist.sort()
13429                                                 print "%s=\"%s\"" % (myvar, " ".join(mylist))
13430                                 print
13431                         print ">>> Attempting to run pkg_info() for '%s'" % pkg
13432                         ebuildpath = vardb.findname(pkg)
13433                         if not ebuildpath or not os.path.exists(ebuildpath):
13434                                 out.ewarn("No ebuild found for '%s'" % pkg)
13435                                 continue
13436                         portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13437                                 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13438                                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13439                                 tree="vartree")
13440
13441 def action_search(root_config, myopts, myfiles, spinner):
13442         if not myfiles:
13443                 print "emerge: no search terms provided."
13444         else:
13445                 searchinstance = search(root_config,
13446                         spinner, "--searchdesc" in myopts,
13447                         "--quiet" not in myopts, "--usepkg" in myopts,
13448                         "--usepkgonly" in myopts)
13449                 for mysearch in myfiles:
13450                         try:
13451                                 searchinstance.execute(mysearch)
13452                         except re.error, comment:
13453                                 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13454                                 sys.exit(1)
13455                         searchinstance.output()
13456
13457 def action_depclean(settings, trees, ldpath_mtimes,
13458         myopts, action, myfiles, spinner):
13459         # Kill packages that aren't explicitly merged or are required as a
13460         # dependency of another package. World file is explicit.
13461
13462         # Global depclean or prune operations are not very safe when there are
13463         # missing dependencies since it's unknown how badly incomplete
13464         # the dependency graph is, and we might accidentally remove packages
13465         # that should have been pulled into the graph. On the other hand, it's
13466         # relatively safe to ignore missing deps when only asked to remove
13467         # specific packages.
13468         allow_missing_deps = len(myfiles) > 0
13469
13470         msg = []
13471         msg.append("Always study the list of packages to be cleaned for any obvious\n")
13472         msg.append("mistakes. Packages that are part of the world set will always\n")
13473         msg.append("be kept.  They can be manually added to this set with\n")
13474         msg.append(good("`emerge --noreplace <atom>`") + ".  Packages that are listed in\n")
13475         msg.append("package.provided (see portage(5)) will be removed by\n")
13476         msg.append("depclean, even if they are part of the world set.\n")
13477         msg.append("\n")
13478         msg.append("As a safety measure, depclean will not remove any packages\n")
13479         msg.append("unless *all* required dependencies have been resolved.  As a\n")
13480         msg.append("consequence, it is often necessary to run %s\n" % \
13481                 good("`emerge --update"))
13482         msg.append(good("--newuse --deep @system @world`") + \
13483                 " prior to depclean.\n")
13484
13485         if action == "depclean" and "--quiet" not in myopts and not myfiles:
13486                 portage.writemsg_stdout("\n")
13487                 for x in msg:
13488                         portage.writemsg_stdout(colorize("WARN", " * ") + x)
13489
13490         xterm_titles = "notitles" not in settings.features
13491         myroot = settings["ROOT"]
13492         root_config = trees[myroot]["root_config"]
13493         getSetAtoms = root_config.setconfig.getSetAtoms
13494         vardb = trees[myroot]["vartree"].dbapi
13495
13496         required_set_names = ("system", "world")
13497         required_sets = {}
13498         set_args = []
13499
13500         for s in required_set_names:
13501                 required_sets[s] = InternalPackageSet(
13502                         initial_atoms=getSetAtoms(s))
13503
13504         
13505         # When removing packages, use a temporary version of world
13506         # which excludes packages that are intended to be eligible for
13507         # removal.
13508         world_temp_set = required_sets["world"]
13509         system_set = required_sets["system"]
13510
13511         if not system_set or not world_temp_set:
13512
13513                 if not system_set:
13514                         writemsg_level("!!! You have no system list.\n",
13515                                 level=logging.ERROR, noiselevel=-1)
13516
13517                 if not world_temp_set:
13518                         writemsg_level("!!! You have no world file.\n",
13519                                         level=logging.WARNING, noiselevel=-1)
13520
13521                 writemsg_level("!!! Proceeding is likely to " + \
13522                         "break your installation.\n",
13523                         level=logging.WARNING, noiselevel=-1)
13524                 if "--pretend" not in myopts:
13525                         countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
13526
13527         if action == "depclean":
13528                 emergelog(xterm_titles, " >>> depclean")
13529
13530         import textwrap
13531         args_set = InternalPackageSet()
13532         if myfiles:
13533                 for x in myfiles:
13534                         if not is_valid_package_atom(x):
13535                                 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
13536                                         level=logging.ERROR, noiselevel=-1)
13537                                 writemsg_level("!!! Please check ebuild(5) for full details.\n")
13538                                 return
13539                         try:
13540                                 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
13541                         except portage.exception.AmbiguousPackageName, e:
13542                                 msg = "The short ebuild name \"" + x + \
13543                                         "\" is ambiguous.  Please specify " + \
13544                                         "one of the following " + \
13545                                         "fully-qualified ebuild names instead:"
13546                                 for line in textwrap.wrap(msg, 70):
13547                                         writemsg_level("!!! %s\n" % (line,),
13548                                                 level=logging.ERROR, noiselevel=-1)
13549                                 for i in e[0]:
13550                                         writemsg_level("    %s\n" % colorize("INFORM", i),
13551                                                 level=logging.ERROR, noiselevel=-1)
13552                                 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13553                                 return
13554                         args_set.add(atom)
13555                 matched_packages = False
13556                 for x in args_set:
13557                         if vardb.match(x):
13558                                 matched_packages = True
13559                                 break
13560                 if not matched_packages:
13561                         writemsg_level(">>> No packages selected for removal by %s\n" % \
13562                                 action)
13563                         return
13564
13565         writemsg_level("\nCalculating dependencies  ")
13566         resolver_params = create_depgraph_params(myopts, "remove")
13567         resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
13568         vardb = resolver.trees[myroot]["vartree"].dbapi
13569
13570         if action == "depclean":
13571
13572                 if args_set:
13573                         # Pull in everything that's installed but not matched
13574                         # by an argument atom since we don't want to clean any
13575                         # package if something depends on it.
13576
13577                         world_temp_set.clear()
13578                         for pkg in vardb:
13579                                 spinner.update()
13580
13581                                 try:
13582                                         if args_set.findAtomForPackage(pkg) is None:
13583                                                 world_temp_set.add("=" + pkg.cpv)
13584                                                 continue
13585                                 except portage.exception.InvalidDependString, e:
13586                                         show_invalid_depstring_notice(pkg,
13587                                                 pkg.metadata["PROVIDE"], str(e))
13588                                         del e
13589                                         world_temp_set.add("=" + pkg.cpv)
13590                                         continue
13591
13592         elif action == "prune":
13593
13594                 # Pull in everything that's installed since we don't
13595                 # to prune a package if something depends on it.
13596                 world_temp_set.clear()
13597                 world_temp_set.update(vardb.cp_all())
13598
13599                 if not args_set:
13600
13601                         # Try to prune everything that's slotted.
13602                         for cp in vardb.cp_all():
13603                                 if len(vardb.cp_list(cp)) > 1:
13604                                         args_set.add(cp)
13605
13606                 # Remove atoms from world that match installed packages
13607                 # that are also matched by argument atoms, but do not remove
13608                 # them if they match the highest installed version.
13609                 for pkg in vardb:
13610                         spinner.update()
13611                         pkgs_for_cp = vardb.match_pkgs(pkg.cp)
13612                         if not pkgs_for_cp or pkg not in pkgs_for_cp:
13613                                 raise AssertionError("package expected in matches: " + \
13614                                         "cp = %s, cpv = %s matches = %s" % \
13615                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13616
13617                         highest_version = pkgs_for_cp[-1]
13618                         if pkg == highest_version:
13619                                 # pkg is the highest version
13620                                 world_temp_set.add("=" + pkg.cpv)
13621                                 continue
13622
13623                         if len(pkgs_for_cp) <= 1:
13624                                 raise AssertionError("more packages expected: " + \
13625                                         "cp = %s, cpv = %s matches = %s" % \
13626                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13627
13628                         try:
13629                                 if args_set.findAtomForPackage(pkg) is None:
13630                                         world_temp_set.add("=" + pkg.cpv)
13631                                         continue
13632                         except portage.exception.InvalidDependString, e:
13633                                 show_invalid_depstring_notice(pkg,
13634                                         pkg.metadata["PROVIDE"], str(e))
13635                                 del e
13636                                 world_temp_set.add("=" + pkg.cpv)
13637                                 continue
13638
13639         set_args = {}
13640         for s, package_set in required_sets.iteritems():
13641                 set_atom = SETPREFIX + s
13642                 set_arg = SetArg(arg=set_atom, set=package_set,
13643                         root_config=resolver.roots[myroot])
13644                 set_args[s] = set_arg
13645                 for atom in set_arg.set:
13646                         resolver._dep_stack.append(
13647                                 Dependency(atom=atom, root=myroot, parent=set_arg))
13648                         resolver.digraph.add(set_arg, None)
13649
13650         success = resolver._complete_graph()
13651         writemsg_level("\b\b... done!\n")
13652
13653         resolver.display_problems()
13654
13655         if not success:
13656                 return 1
13657
13658         def unresolved_deps():
13659
13660                 unresolvable = set()
13661                 for dep in resolver._initially_unsatisfied_deps:
13662                         if isinstance(dep.parent, Package) and \
13663                                 (dep.priority > UnmergeDepPriority.SOFT):
13664                                 unresolvable.add((dep.atom, dep.parent.cpv))
13665
13666                 if not unresolvable:
13667                         return False
13668
13669                 if unresolvable and not allow_missing_deps:
13670                         prefix = bad(" * ")
13671                         msg = []
13672                         msg.append("Dependencies could not be completely resolved due to")
13673                         msg.append("the following required packages not being installed:")
13674                         msg.append("")
13675                         for atom, parent in unresolvable:
13676                                 msg.append("  %s pulled in by:" % (atom,))
13677                                 msg.append("    %s" % (parent,))
13678                                 msg.append("")
13679                         msg.append("Have you forgotten to run " + \
13680                                 good("`emerge --update --newuse --deep @system @world`") + " prior")
13681                         msg.append(("to %s? It may be necessary to manually " + \
13682                                 "uninstall packages that no longer") % action)
13683                         msg.append("exist in the portage tree since " + \
13684                                 "it may not be possible to satisfy their")
13685                         msg.append("dependencies.  Also, be aware of " + \
13686                                 "the --with-bdeps option that is documented")
13687                         msg.append("in " + good("`man emerge`") + ".")
13688                         if action == "prune":
13689                                 msg.append("")
13690                                 msg.append("If you would like to ignore " + \
13691                                         "dependencies then use %s." % good("--nodeps"))
13692                         writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
13693                                 level=logging.ERROR, noiselevel=-1)
13694                         return True
13695                 return False
13696
13697         if unresolved_deps():
13698                 return 1
13699
13700         graph = resolver.digraph.copy()
13701         required_pkgs_total = 0
13702         for node in graph:
13703                 if isinstance(node, Package):
13704                         required_pkgs_total += 1
13705
13706         def show_parents(child_node):
13707                 parent_nodes = graph.parent_nodes(child_node)
13708                 if not parent_nodes:
13709                         # With --prune, the highest version can be pulled in without any
13710                         # real parent since all installed packages are pulled in.  In that
13711                         # case there's nothing to show here.
13712                         return
13713                 parent_strs = []
13714                 for node in parent_nodes:
13715                         parent_strs.append(str(getattr(node, "cpv", node)))
13716                 parent_strs.sort()
13717                 msg = []
13718                 msg.append("  %s pulled in by:\n" % (child_node.cpv,))
13719                 for parent_str in parent_strs:
13720                         msg.append("    %s\n" % (parent_str,))
13721                 msg.append("\n")
13722                 portage.writemsg_stdout("".join(msg), noiselevel=-1)
13723
13724         def cmp_pkg_cpv(pkg1, pkg2):
13725                 """Sort Package instances by cpv."""
13726                 if pkg1.cpv > pkg2.cpv:
13727                         return 1
13728                 elif pkg1.cpv == pkg2.cpv:
13729                         return 0
13730                 else:
13731                         return -1
13732
13733         def create_cleanlist():
13734                 pkgs_to_remove = []
13735
13736                 if action == "depclean":
13737                         if args_set:
13738
13739                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13740                                         arg_atom = None
13741                                         try:
13742                                                 arg_atom = args_set.findAtomForPackage(pkg)
13743                                         except portage.exception.InvalidDependString:
13744                                                 # this error has already been displayed by now
13745                                                 continue
13746
13747                                         if arg_atom:
13748                                                 if pkg not in graph:
13749                                                         pkgs_to_remove.append(pkg)
13750                                                 elif "--verbose" in myopts:
13751                                                         show_parents(pkg)
13752
13753                         else:
13754                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13755                                         if pkg not in graph:
13756                                                 pkgs_to_remove.append(pkg)
13757                                         elif "--verbose" in myopts:
13758                                                 show_parents(pkg)
13759
13760                 elif action == "prune":
13761                         # Prune really uses all installed instead of world. It's not
13762                         # a real reverse dependency so don't display it as such.
13763                         graph.remove(set_args["world"])
13764
13765                         for atom in args_set:
13766                                 for pkg in vardb.match_pkgs(atom):
13767                                         if pkg not in graph:
13768                                                 pkgs_to_remove.append(pkg)
13769                                         elif "--verbose" in myopts:
13770                                                 show_parents(pkg)
13771
13772                 if not pkgs_to_remove:
13773                         writemsg_level(
13774                                 ">>> No packages selected for removal by %s\n" % action)
13775                         if "--verbose" not in myopts:
13776                                 writemsg_level(
13777                                         ">>> To see reverse dependencies, use %s\n" % \
13778                                                 good("--verbose"))
13779                         if action == "prune":
13780                                 writemsg_level(
13781                                         ">>> To ignore dependencies, use %s\n" % \
13782                                                 good("--nodeps"))
13783
13784                 return pkgs_to_remove
13785
13786         cleanlist = create_cleanlist()
13787
13788         if len(cleanlist):
13789                 clean_set = set(cleanlist)
13790
13791                 # Check if any of these package are the sole providers of libraries
13792                 # with consumers that have not been selected for removal. If so, these
13793                 # packages and any dependencies need to be added to the graph.
13794                 real_vardb = trees[myroot]["vartree"].dbapi
13795                 linkmap = real_vardb.linkmap
13796                 liblist = linkmap.listLibraryObjects()
13797                 consumer_cache = {}
13798                 provider_cache = {}
13799                 soname_cache = {}
13800                 consumer_map = {}
13801
13802                 writemsg_level(">>> Checking for lib consumers...\n")
13803
13804                 for pkg in cleanlist:
13805                         pkg_dblink = real_vardb._dblink(pkg.cpv)
13806                         provided_libs = set()
13807
13808                         for lib in liblist:
13809                                 if pkg_dblink.isowner(lib, myroot):
13810                                         provided_libs.add(lib)
13811
13812                         if not provided_libs:
13813                                 continue
13814
13815                         consumers = {}
13816                         for lib in provided_libs:
13817                                 lib_consumers = consumer_cache.get(lib)
13818                                 if lib_consumers is None:
13819                                         lib_consumers = linkmap.findConsumers(lib)
13820                                         consumer_cache[lib] = lib_consumers
13821                                 if lib_consumers:
13822                                         consumers[lib] = lib_consumers
13823
13824                         if not consumers:
13825                                 continue
13826
13827                         for lib, lib_consumers in consumers.items():
13828                                 for consumer_file in list(lib_consumers):
13829                                         if pkg_dblink.isowner(consumer_file, myroot):
13830                                                 lib_consumers.remove(consumer_file)
13831                                 if not lib_consumers:
13832                                         del consumers[lib]
13833
13834                         if not consumers:
13835                                 continue
13836
13837                         for lib, lib_consumers in consumers.iteritems():
13838
13839                                 soname = soname_cache.get(lib)
13840                                 if soname is None:
13841                                         soname = linkmap.getSoname(lib)
13842                                         soname_cache[lib] = soname
13843
13844                                 consumer_providers = []
13845                                 for lib_consumer in lib_consumers:
13846                                         providers = provider_cache.get(lib)
13847                                         if providers is None:
13848                                                 providers = linkmap.findProviders(lib_consumer)
13849                                                 provider_cache[lib_consumer] = providers
13850                                         if soname not in providers:
13851                                                 # Why does this happen?
13852                                                 continue
13853                                         consumer_providers.append(
13854                                                 (lib_consumer, providers[soname]))
13855
13856                                 consumers[lib] = consumer_providers
13857
13858                         consumer_map[pkg] = consumers
13859
13860                 if consumer_map:
13861
13862                         search_files = set()
13863                         for consumers in consumer_map.itervalues():
13864                                 for lib, consumer_providers in consumers.iteritems():
13865                                         for lib_consumer, providers in consumer_providers:
13866                                                 search_files.add(lib_consumer)
13867                                                 search_files.update(providers)
13868
13869                         writemsg_level(">>> Assigning files to packages...\n")
13870                         file_owners = real_vardb._owners.getFileOwnerMap(search_files)
13871
13872                         for pkg, consumers in consumer_map.items():
13873                                 for lib, consumer_providers in consumers.items():
13874                                         lib_consumers = set()
13875
13876                                         for lib_consumer, providers in consumer_providers:
13877                                                 owner_set = file_owners.get(lib_consumer)
13878                                                 provider_dblinks = set()
13879                                                 provider_pkgs = set()
13880
13881                                                 if len(providers) > 1:
13882                                                         for provider in providers:
13883                                                                 provider_set = file_owners.get(provider)
13884                                                                 if provider_set is not None:
13885                                                                         provider_dblinks.update(provider_set)
13886
13887                                                 if len(provider_dblinks) > 1:
13888                                                         for provider_dblink in provider_dblinks:
13889                                                                 pkg_key = ("installed", myroot,
13890                                                                         provider_dblink.mycpv, "nomerge")
13891                                                                 if pkg_key not in clean_set:
13892                                                                         provider_pkgs.add(vardb.get(pkg_key))
13893
13894                                                 if provider_pkgs:
13895                                                         continue
13896
13897                                                 if owner_set is not None:
13898                                                         lib_consumers.update(owner_set)
13899
13900                                         for consumer_dblink in list(lib_consumers):
13901                                                 if ("installed", myroot, consumer_dblink.mycpv,
13902                                                         "nomerge") in clean_set:
13903                                                         lib_consumers.remove(consumer_dblink)
13904                                                         continue
13905
13906                                         if lib_consumers:
13907                                                 consumers[lib] = lib_consumers
13908                                         else:
13909                                                 del consumers[lib]
13910                                 if not consumers:
13911                                         del consumer_map[pkg]
13912
13913                 if consumer_map:
13914                         # TODO: Implement a package set for rebuilding consumer packages.
13915
13916                         msg = "In order to avoid breakage of link level " + \
13917                                 "dependencies, one or more packages will not be removed. " + \
13918                                 "This can be solved by rebuilding " + \
13919                                 "the packages that pulled them in."
13920
13921                         prefix = bad(" * ")
13922                         from textwrap import wrap
13923                         writemsg_level("".join(prefix + "%s\n" % line for \
13924                                 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
13925
13926                         msg = []
13927                         for pkg, consumers in consumer_map.iteritems():
13928                                 unique_consumers = set(chain(*consumers.values()))
13929                                 unique_consumers = sorted(consumer.mycpv \
13930                                         for consumer in unique_consumers)
13931                                 msg.append("")
13932                                 msg.append("  %s pulled in by:" % (pkg.cpv,))
13933                                 for consumer in unique_consumers:
13934                                         msg.append("    %s" % (consumer,))
13935                         msg.append("")
13936                         writemsg_level("".join(prefix + "%s\n" % line for line in msg),
13937                                 level=logging.WARNING, noiselevel=-1)
13938
13939                         # Add lib providers to the graph as children of lib consumers,
13940                         # and also add any dependencies pulled in by the provider.
13941                         writemsg_level(">>> Adding lib providers to graph...\n")
13942
13943                         for pkg, consumers in consumer_map.iteritems():
13944                                 for consumer_dblink in set(chain(*consumers.values())):
13945                                         consumer_pkg = vardb.get(("installed", myroot,
13946                                                 consumer_dblink.mycpv, "nomerge"))
13947                                         if not resolver._add_pkg(pkg,
13948                                                 Dependency(parent=consumer_pkg,
13949                                                 priority=UnmergeDepPriority(runtime=True),
13950                                                 root=pkg.root)):
13951                                                 resolver.display_problems()
13952                                                 return 1
13953
13954                         writemsg_level("\nCalculating dependencies  ")
13955                         success = resolver._complete_graph()
13956                         writemsg_level("\b\b... done!\n")
13957                         resolver.display_problems()
13958                         if not success:
13959                                 return 1
13960                         if unresolved_deps():
13961                                 return 1
13962
13963                         graph = resolver.digraph.copy()
13964                         required_pkgs_total = 0
13965                         for node in graph:
13966                                 if isinstance(node, Package):
13967                                         required_pkgs_total += 1
13968                         cleanlist = create_cleanlist()
13969                         if not cleanlist:
13970                                 return 0
13971                         clean_set = set(cleanlist)
13972
13973                 # Use a topological sort to create an unmerge order such that
13974                 # each package is unmerged before it's dependencies. This is
13975                 # necessary to avoid breaking things that may need to run
13976                 # during pkg_prerm or pkg_postrm phases.
13977
13978                 # Create a new graph to account for dependencies between the
13979                 # packages being unmerged.
13980                 graph = digraph()
13981                 del cleanlist[:]
13982
13983                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
13984                 runtime = UnmergeDepPriority(runtime=True)
13985                 runtime_post = UnmergeDepPriority(runtime_post=True)
13986                 buildtime = UnmergeDepPriority(buildtime=True)
13987                 priority_map = {
13988                         "RDEPEND": runtime,
13989                         "PDEPEND": runtime_post,
13990                         "DEPEND": buildtime,
13991                 }
13992
13993                 for node in clean_set:
13994                         graph.add(node, None)
13995                         mydeps = []
13996                         node_use = node.metadata["USE"].split()
13997                         for dep_type in dep_keys:
13998                                 depstr = node.metadata[dep_type]
13999                                 if not depstr:
14000                                         continue
14001                                 try:
14002                                         portage.dep._dep_check_strict = False
14003                                         success, atoms = portage.dep_check(depstr, None, settings,
14004                                                 myuse=node_use, trees=resolver._graph_trees,
14005                                                 myroot=myroot)
14006                                 finally:
14007                                         portage.dep._dep_check_strict = True
14008                                 if not success:
14009                                         # Ignore invalid deps of packages that will
14010                                         # be uninstalled anyway.
14011                                         continue
14012
14013                                 priority = priority_map[dep_type]
14014                                 for atom in atoms:
14015                                         if not isinstance(atom, portage.dep.Atom):
14016                                                 # Ignore invalid atoms returned from dep_check().
14017                                                 continue
14018                                         if atom.blocker:
14019                                                 continue
14020                                         matches = vardb.match_pkgs(atom)
14021                                         if not matches:
14022                                                 continue
14023                                         for child_node in matches:
14024                                                 if child_node in clean_set:
14025                                                         graph.add(child_node, node, priority=priority)
14026
14027                 ordered = True
14028                 if len(graph.order) == len(graph.root_nodes()):
14029                         # If there are no dependencies between packages
14030                         # let unmerge() group them by cat/pn.
14031                         ordered = False
14032                         cleanlist = [pkg.cpv for pkg in graph.order]
14033                 else:
14034                         # Order nodes from lowest to highest overall reference count for
14035                         # optimal root node selection.
14036                         node_refcounts = {}
14037                         for node in graph.order:
14038                                 node_refcounts[node] = len(graph.parent_nodes(node))
14039                         def cmp_reference_count(node1, node2):
14040                                 return node_refcounts[node1] - node_refcounts[node2]
14041                         graph.order.sort(key=cmp_sort_key(cmp_reference_count))
14042         
14043                         ignore_priority_range = [None]
14044                         ignore_priority_range.extend(
14045                                 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
14046                         while not graph.empty():
14047                                 for ignore_priority in ignore_priority_range:
14048                                         nodes = graph.root_nodes(ignore_priority=ignore_priority)
14049                                         if nodes:
14050                                                 break
14051                                 if not nodes:
14052                                         raise AssertionError("no root nodes")
14053                                 if ignore_priority is not None:
14054                                         # Some deps have been dropped due to circular dependencies,
14055                                         # so only pop one node in order do minimize the number that
14056                                         # are dropped.
14057                                         del nodes[1:]
14058                                 for node in nodes:
14059                                         graph.remove(node)
14060                                         cleanlist.append(node.cpv)
14061
14062                 unmerge(root_config, myopts, "unmerge", cleanlist,
14063                         ldpath_mtimes, ordered=ordered)
14064
14065         if action == "prune":
14066                 return
14067
14068         if not cleanlist and "--quiet" in myopts:
14069                 return
14070
14071         print "Packages installed:   "+str(len(vardb.cpv_all()))
14072         print "Packages in world:    " + \
14073                 str(len(root_config.sets["world"].getAtoms()))
14074         print "Packages in system:   " + \
14075                 str(len(root_config.sets["system"].getAtoms()))
14076         print "Required packages:    "+str(required_pkgs_total)
14077         if "--pretend" in myopts:
14078                 print "Number to remove:     "+str(len(cleanlist))
14079         else:
14080                 print "Number removed:       "+str(len(cleanlist))
14081
14082 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
14083         """
14084         Construct a depgraph for the given resume list. This will raise
14085         PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
14086         @rtype: tuple
14087         @returns: (success, depgraph, dropped_tasks)
14088         """
14089         skip_masked = True
14090         skip_unsatisfied = True
14091         mergelist = mtimedb["resume"]["mergelist"]
14092         dropped_tasks = set()
14093         while True:
14094                 mydepgraph = depgraph(settings, trees,
14095                         myopts, myparams, spinner)
14096                 try:
14097                         success = mydepgraph.loadResumeCommand(mtimedb["resume"],
14098                                 skip_masked=skip_masked)
14099                 except depgraph.UnsatisfiedResumeDep, e:
14100                         if not skip_unsatisfied:
14101                                 raise
14102
14103                         graph = mydepgraph.digraph
14104                         unsatisfied_parents = dict((dep.parent, dep.parent) \
14105                                 for dep in e.value)
14106                         traversed_nodes = set()
14107                         unsatisfied_stack = list(unsatisfied_parents)
14108                         while unsatisfied_stack:
14109                                 pkg = unsatisfied_stack.pop()
14110                                 if pkg in traversed_nodes:
14111                                         continue
14112                                 traversed_nodes.add(pkg)
14113
14114                                 # If this package was pulled in by a parent
14115                                 # package scheduled for merge, removing this
14116                                 # package may cause the the parent package's
14117                                 # dependency to become unsatisfied.
14118                                 for parent_node in graph.parent_nodes(pkg):
14119                                         if not isinstance(parent_node, Package) \
14120                                                 or parent_node.operation not in ("merge", "nomerge"):
14121                                                 continue
14122                                         unsatisfied = \
14123                                                 graph.child_nodes(parent_node,
14124                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
14125                                         if pkg in unsatisfied:
14126                                                 unsatisfied_parents[parent_node] = parent_node
14127                                                 unsatisfied_stack.append(parent_node)
14128
14129                         pruned_mergelist = []
14130                         for x in mergelist:
14131                                 if isinstance(x, list) and \
14132                                         tuple(x) not in unsatisfied_parents:
14133                                         pruned_mergelist.append(x)
14134
14135                         # If the mergelist doesn't shrink then this loop is infinite.
14136                         if len(pruned_mergelist) == len(mergelist):
14137                                 # This happens if a package can't be dropped because
14138                                 # it's already installed, but it has unsatisfied PDEPEND.
14139                                 raise
14140                         mergelist[:] = pruned_mergelist
14141
14142                         # Exclude installed packages that have been removed from the graph due
14143                         # to failure to build/install runtime dependencies after the dependent
14144                         # package has already been installed.
14145                         dropped_tasks.update(pkg for pkg in \
14146                                 unsatisfied_parents if pkg.operation != "nomerge")
14147                         mydepgraph.break_refs(unsatisfied_parents)
14148
14149                         del e, graph, traversed_nodes, \
14150                                 unsatisfied_parents, unsatisfied_stack
14151                         continue
14152                 else:
14153                         break
14154         return (success, mydepgraph, dropped_tasks)
14155
14156 def action_build(settings, trees, mtimedb,
14157         myopts, myaction, myfiles, spinner):
14158
14159         # validate the state of the resume data
14160         # so that we can make assumptions later.
14161         for k in ("resume", "resume_backup"):
14162                 if k not in mtimedb:
14163                         continue
14164                 resume_data = mtimedb[k]
14165                 if not isinstance(resume_data, dict):
14166                         del mtimedb[k]
14167                         continue
14168                 mergelist = resume_data.get("mergelist")
14169                 if not isinstance(mergelist, list):
14170                         del mtimedb[k]
14171                         continue
14172                 for x in mergelist:
14173                         if not (isinstance(x, list) and len(x) == 4):
14174                                 continue
14175                         pkg_type, pkg_root, pkg_key, pkg_action = x
14176                         if pkg_root not in trees:
14177                                 # Current $ROOT setting differs,
14178                                 # so the list must be stale.
14179                                 mergelist = None
14180                                 break
14181                 if not mergelist:
14182                         del mtimedb[k]
14183                         continue
14184                 resume_opts = resume_data.get("myopts")
14185                 if not isinstance(resume_opts, (dict, list)):
14186                         del mtimedb[k]
14187                         continue
14188                 favorites = resume_data.get("favorites")
14189                 if not isinstance(favorites, list):
14190                         del mtimedb[k]
14191                         continue
14192
14193         resume = False
14194         if "--resume" in myopts and \
14195                 ("resume" in mtimedb or
14196                 "resume_backup" in mtimedb):
14197                 resume = True
14198                 if "resume" not in mtimedb:
14199                         mtimedb["resume"] = mtimedb["resume_backup"]
14200                         del mtimedb["resume_backup"]
14201                         mtimedb.commit()
14202                 # "myopts" is a list for backward compatibility.
14203                 resume_opts = mtimedb["resume"].get("myopts", [])
14204                 if isinstance(resume_opts, list):
14205                         resume_opts = dict((k,True) for k in resume_opts)
14206                 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
14207                         resume_opts.pop(opt, None)
14208                 myopts.update(resume_opts)
14209
14210                 if "--debug" in myopts:
14211                         writemsg_level("myopts %s\n" % (myopts,))
14212
14213                 # Adjust config according to options of the command being resumed.
14214                 for myroot in trees:
14215                         mysettings =  trees[myroot]["vartree"].settings
14216                         mysettings.unlock()
14217                         adjust_config(myopts, mysettings)
14218                         mysettings.lock()
14219                         del myroot, mysettings
14220
14221         ldpath_mtimes = mtimedb["ldpath"]
14222         favorites=[]
14223         merge_count = 0
14224         buildpkgonly = "--buildpkgonly" in myopts
14225         pretend = "--pretend" in myopts
14226         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14227         ask = "--ask" in myopts
14228         nodeps = "--nodeps" in myopts
14229         oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
14230         tree = "--tree" in myopts
14231         if nodeps and tree:
14232                 tree = False
14233                 del myopts["--tree"]
14234                 portage.writemsg(colorize("WARN", " * ") + \
14235                         "--tree is broken with --nodeps. Disabling...\n")
14236         debug = "--debug" in myopts
14237         verbose = "--verbose" in myopts
14238         quiet = "--quiet" in myopts
14239         if pretend or fetchonly:
14240                 # make the mtimedb readonly
14241                 mtimedb.filename = None
14242         if "--digest" in myopts:
14243                 msg = "The --digest option can prevent corruption from being" + \
14244                         " noticed. The `repoman manifest` command is the preferred" + \
14245                         " way to generate manifests and it is capable of doing an" + \
14246                         " entire repository or category at once."
14247                 prefix = bad(" * ")
14248                 writemsg(prefix + "\n")
14249                 from textwrap import wrap
14250                 for line in wrap(msg, 72):
14251                         writemsg("%s%s\n" % (prefix, line))
14252                 writemsg(prefix + "\n")
14253
14254         if "--quiet" not in myopts and \
14255                 ("--pretend" in myopts or "--ask" in myopts or \
14256                 "--tree" in myopts or "--verbose" in myopts):
14257                 action = ""
14258                 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14259                         action = "fetched"
14260                 elif "--buildpkgonly" in myopts:
14261                         action = "built"
14262                 else:
14263                         action = "merged"
14264                 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
14265                         print
14266                         print darkgreen("These are the packages that would be %s, in reverse order:") % action
14267                         print
14268                 else:
14269                         print
14270                         print darkgreen("These are the packages that would be %s, in order:") % action
14271                         print
14272
14273         show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
14274         if not show_spinner:
14275                 spinner.update = spinner.update_quiet
14276
14277         if resume:
14278                 favorites = mtimedb["resume"].get("favorites")
14279                 if not isinstance(favorites, list):
14280                         favorites = []
14281
14282                 if show_spinner:
14283                         print "Calculating dependencies  ",
14284                 myparams = create_depgraph_params(myopts, myaction)
14285
14286                 resume_data = mtimedb["resume"]
14287                 mergelist = resume_data["mergelist"]
14288                 if mergelist and "--skipfirst" in myopts:
14289                         for i, task in enumerate(mergelist):
14290                                 if isinstance(task, list) and \
14291                                         task and task[-1] == "merge":
14292                                         del mergelist[i]
14293                                         break
14294
14295                 success = False
14296                 mydepgraph = None
14297                 try:
14298                         success, mydepgraph, dropped_tasks = resume_depgraph(
14299                                 settings, trees, mtimedb, myopts, myparams, spinner)
14300                 except (portage.exception.PackageNotFound,
14301                         depgraph.UnsatisfiedResumeDep), e:
14302                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
14303                                 mydepgraph = e.depgraph
14304                         if show_spinner:
14305                                 print
14306                         from textwrap import wrap
14307                         from portage.output import EOutput
14308                         out = EOutput()
14309
14310                         resume_data = mtimedb["resume"]
14311                         mergelist = resume_data.get("mergelist")
14312                         if not isinstance(mergelist, list):
14313                                 mergelist = []
14314                         if mergelist and debug or (verbose and not quiet):
14315                                 out.eerror("Invalid resume list:")
14316                                 out.eerror("")
14317                                 indent = "  "
14318                                 for task in mergelist:
14319                                         if isinstance(task, list):
14320                                                 out.eerror(indent + str(tuple(task)))
14321                                 out.eerror("")
14322
14323                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
14324                                 out.eerror("One or more packages are either masked or " + \
14325                                         "have missing dependencies:")
14326                                 out.eerror("")
14327                                 indent = "  "
14328                                 for dep in e.value:
14329                                         if dep.atom is None:
14330                                                 out.eerror(indent + "Masked package:")
14331                                                 out.eerror(2 * indent + str(dep.parent))
14332                                                 out.eerror("")
14333                                         else:
14334                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
14335                                                 out.eerror(2 * indent + str(dep.parent))
14336                                                 out.eerror("")
14337                                 msg = "The resume list contains packages " + \
14338                                         "that are either masked or have " + \
14339                                         "unsatisfied dependencies. " + \
14340                                         "Please restart/continue " + \
14341                                         "the operation manually, or use --skipfirst " + \
14342                                         "to skip the first package in the list and " + \
14343                                         "any other packages that may be " + \
14344                                         "masked or have missing dependencies."
14345                                 for line in wrap(msg, 72):
14346                                         out.eerror(line)
14347                         elif isinstance(e, portage.exception.PackageNotFound):
14348                                 out.eerror("An expected package is " + \
14349                                         "not available: %s" % str(e))
14350                                 out.eerror("")
14351                                 msg = "The resume list contains one or more " + \
14352                                         "packages that are no longer " + \
14353                                         "available. Please restart/continue " + \
14354                                         "the operation manually."
14355                                 for line in wrap(msg, 72):
14356                                         out.eerror(line)
14357                 else:
14358                         if show_spinner:
14359                                 print "\b\b... done!"
14360
14361                 if success:
14362                         if dropped_tasks:
14363                                 portage.writemsg("!!! One or more packages have been " + \
14364                                         "dropped due to\n" + \
14365                                         "!!! masking or unsatisfied dependencies:\n\n",
14366                                         noiselevel=-1)
14367                                 for task in dropped_tasks:
14368                                         portage.writemsg("  " + str(task) + "\n", noiselevel=-1)
14369                                 portage.writemsg("\n", noiselevel=-1)
14370                         del dropped_tasks
14371                 else:
14372                         if mydepgraph is not None:
14373                                 mydepgraph.display_problems()
14374                         if not (ask or pretend):
14375                                 # delete the current list and also the backup
14376                                 # since it's probably stale too.
14377                                 for k in ("resume", "resume_backup"):
14378                                         mtimedb.pop(k, None)
14379                                 mtimedb.commit()
14380
14381                         return 1
14382         else:
14383                 if ("--resume" in myopts):
14384                         print darkgreen("emerge: It seems we have nothing to resume...")
14385                         return os.EX_OK
14386
14387                 myparams = create_depgraph_params(myopts, myaction)
14388                 if "--quiet" not in myopts and "--nodeps" not in myopts:
14389                         print "Calculating dependencies  ",
14390                         sys.stdout.flush()
14391                 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
14392                 try:
14393                         retval, favorites = mydepgraph.select_files(myfiles)
14394                 except portage.exception.PackageNotFound, e:
14395                         portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
14396                         return 1
14397                 except portage.exception.PackageSetNotFound, e:
14398                         root_config = trees[settings["ROOT"]]["root_config"]
14399                         display_missing_pkg_set(root_config, e.value)
14400                         return 1
14401                 if show_spinner:
14402                         print "\b\b... done!"
14403                 if not retval:
14404                         mydepgraph.display_problems()
14405                         return 1
14406
14407         if "--pretend" not in myopts and \
14408                 ("--ask" in myopts or "--tree" in myopts or \
14409                 "--verbose" in myopts) and \
14410                 not ("--quiet" in myopts and "--ask" not in myopts):
14411                 if "--resume" in myopts:
14412                         mymergelist = mydepgraph.altlist()
14413                         if len(mymergelist) == 0:
14414                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14415                                 return os.EX_OK
14416                         favorites = mtimedb["resume"]["favorites"]
14417                         retval = mydepgraph.display(
14418                                 mydepgraph.altlist(reversed=tree),
14419                                 favorites=favorites)
14420                         mydepgraph.display_problems()
14421                         if retval != os.EX_OK:
14422                                 return retval
14423                         prompt="Would you like to resume merging these packages?"
14424                 else:
14425                         retval = mydepgraph.display(
14426                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
14427                                 favorites=favorites)
14428                         mydepgraph.display_problems()
14429                         if retval != os.EX_OK:
14430                                 return retval
14431                         mergecount=0
14432                         for x in mydepgraph.altlist():
14433                                 if isinstance(x, Package) and x.operation == "merge":
14434                                         mergecount += 1
14435
14436                         if mergecount==0:
14437                                 sets = trees[settings["ROOT"]]["root_config"].sets
14438                                 world_candidates = None
14439                                 if "--noreplace" in myopts and \
14440                                         not oneshot and favorites:
14441                                         # Sets that are not world candidates are filtered
14442                                         # out here since the favorites list needs to be
14443                                         # complete for depgraph.loadResumeCommand() to
14444                                         # operate correctly.
14445                                         world_candidates = [x for x in favorites \
14446                                                 if not (x.startswith(SETPREFIX) and \
14447                                                 not sets[x[1:]].world_candidate)]
14448                                 if "--noreplace" in myopts and \
14449                                         not oneshot and world_candidates:
14450                                         print
14451                                         for x in world_candidates:
14452                                                 print " %s %s" % (good("*"), x)
14453                                         prompt="Would you like to add these packages to your world favorites?"
14454                                 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
14455                                         prompt="Nothing to merge; would you like to auto-clean packages?"
14456                                 else:
14457                                         print
14458                                         print "Nothing to merge; quitting."
14459                                         print
14460                                         return os.EX_OK
14461                         elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14462                                 prompt="Would you like to fetch the source files for these packages?"
14463                         else:
14464                                 prompt="Would you like to merge these packages?"
14465                 print
14466                 if "--ask" in myopts and userquery(prompt) == "No":
14467                         print
14468                         print "Quitting."
14469                         print
14470                         return os.EX_OK
14471                 # Don't ask again (e.g. when auto-cleaning packages after merge)
14472                 myopts.pop("--ask", None)
14473
14474         if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14475                 if ("--resume" in myopts):
14476                         mymergelist = mydepgraph.altlist()
14477                         if len(mymergelist) == 0:
14478                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14479                                 return os.EX_OK
14480                         favorites = mtimedb["resume"]["favorites"]
14481                         retval = mydepgraph.display(
14482                                 mydepgraph.altlist(reversed=tree),
14483                                 favorites=favorites)
14484                         mydepgraph.display_problems()
14485                         if retval != os.EX_OK:
14486                                 return retval
14487                 else:
14488                         retval = mydepgraph.display(
14489                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
14490                                 favorites=favorites)
14491                         mydepgraph.display_problems()
14492                         if retval != os.EX_OK:
14493                                 return retval
14494                         if "--buildpkgonly" in myopts:
14495                                 graph_copy = mydepgraph.digraph.clone()
14496                                 removed_nodes = set()
14497                                 for node in list(graph_copy.order):
14498                                         if not isinstance(node, Package) or \
14499                                                 node.operation == "nomerge":
14500                                                 removed_nodes.add(node)
14501                                 graph_copy.difference_update(removed_nodes)
14502                                 if not graph_copy.hasallzeros(ignore_priority = \
14503                                         DepPrioritySatisfiedRange.ignore_medium):
14504                                         print "\n!!! --buildpkgonly requires all dependencies to be merged."
14505                                         print "!!! You have to merge the dependencies before you can build this package.\n"
14506                                         return 1
14507         else:
14508                 if "--buildpkgonly" in myopts:
14509                         graph_copy = mydepgraph.digraph.clone()
14510                         removed_nodes = set()
14511                         for node in list(graph_copy.order):
14512                                 if not isinstance(node, Package) or \
14513                                         node.operation == "nomerge":
14514                                         removed_nodes.add(node)
14515                         graph_copy.difference_update(removed_nodes)
14516                         if not graph_copy.hasallzeros(ignore_priority = \
14517                                 DepPrioritySatisfiedRange.ignore_medium):
14518                                 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14519                                 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
14520                                 return 1
14521
14522                 if ("--resume" in myopts):
14523                         favorites=mtimedb["resume"]["favorites"]
14524                         mymergelist = mydepgraph.altlist()
14525                         mydepgraph.break_refs(mymergelist)
14526                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
14527                                 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
14528                         del mydepgraph, mymergelist
14529                         clear_caches(trees)
14530
14531                         retval = mergetask.merge()
14532                         merge_count = mergetask.curval
14533                 else:
14534                         if "resume" in mtimedb and \
14535                         "mergelist" in mtimedb["resume"] and \
14536                         len(mtimedb["resume"]["mergelist"]) > 1:
14537                                 mtimedb["resume_backup"] = mtimedb["resume"]
14538                                 del mtimedb["resume"]
14539                                 mtimedb.commit()
14540                         mtimedb["resume"]={}
14541                         # Stored as a dict starting with portage-2.1.6_rc1, and supported
14542                         # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
14543                         # a list type for options.
14544                         mtimedb["resume"]["myopts"] = myopts.copy()
14545
14546                         # Convert Atom instances to plain str.
14547                         mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
14548
14549                         if ("--digest" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14550                                 for pkgline in mydepgraph.altlist():
14551                                         if pkgline[0]=="ebuild" and pkgline[3]=="merge":
14552                                                 y = trees[pkgline[1]]["porttree"].dbapi.findname(pkgline[2])
14553                                                 tmpsettings = portage.config(clone=settings)
14554                                                 edebug = 0
14555                                                 if settings.get("PORTAGE_DEBUG", "") == "1":
14556                                                         edebug = 1
14557                                                 retval = portage.doebuild(
14558                                                         y, "digest", settings["ROOT"], tmpsettings, edebug,
14559                                                         ("--pretend" in myopts),
14560                                                         mydbapi=trees[pkgline[1]]["porttree"].dbapi,
14561                                                         tree="porttree")
14562
14563                         pkglist = mydepgraph.altlist()
14564                         mydepgraph.saveNomergeFavorites()
14565                         mydepgraph.break_refs(pkglist)
14566                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
14567                                 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
14568                         del mydepgraph, pkglist
14569                         clear_caches(trees)
14570
14571                         retval = mergetask.merge()
14572                         merge_count = mergetask.curval
14573
14574                 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
14575                         if "yes" == settings.get("AUTOCLEAN"):
14576                                 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
14577                                 unmerge(trees[settings["ROOT"]]["root_config"],
14578                                         myopts, "clean", [],
14579                                         ldpath_mtimes, autoclean=1)
14580                         else:
14581                                 portage.writemsg_stdout(colorize("WARN", "WARNING:")
14582                                         + " AUTOCLEAN is disabled.  This can cause serious"
14583                                         + " problems due to overlapping packages.\n")
14584                         trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
14585
14586                 return retval
14587
14588 def multiple_actions(action1, action2):
14589         sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
14590         sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
14591         sys.exit(1)
14592
14593 def insert_optional_args(args):
14594         """
14595         Parse optional arguments and insert a value if one has
14596         not been provided. This is done before feeding the args
14597         to the optparse parser since that parser does not support
14598         this feature natively.
14599         """
14600
14601         new_args = []
14602         jobs_opts = ("-j", "--jobs")
14603         arg_stack = args[:]
14604         arg_stack.reverse()
14605         while arg_stack:
14606                 arg = arg_stack.pop()
14607
14608                 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
14609                 if not (short_job_opt or arg in jobs_opts):
14610                         new_args.append(arg)
14611                         continue
14612
14613                 # Insert an empty placeholder in order to
14614                 # satisfy the requirements of optparse.
14615
14616                 new_args.append("--jobs")
14617                 job_count = None
14618                 saved_opts = None
14619                 if short_job_opt and len(arg) > 2:
14620                         if arg[:2] == "-j":
14621                                 try:
14622                                         job_count = int(arg[2:])
14623                                 except ValueError:
14624                                         saved_opts = arg[2:]
14625                         else:
14626                                 job_count = "True"
14627                                 saved_opts = arg[1:].replace("j", "")
14628
14629                 if job_count is None and arg_stack:
14630                         try:
14631                                 job_count = int(arg_stack[-1])
14632                         except ValueError:
14633                                 pass
14634                         else:
14635                                 # Discard the job count from the stack
14636                                 # since we're consuming it here.
14637                                 arg_stack.pop()
14638
14639                 if job_count is None:
14640                         # unlimited number of jobs
14641                         new_args.append("True")
14642                 else:
14643                         new_args.append(str(job_count))
14644
14645                 if saved_opts is not None:
14646                         new_args.append("-" + saved_opts)
14647
14648         return new_args
14649
14650 def parse_opts(tmpcmdline, silent=False):
14651         myaction=None
14652         myopts = {}
14653         myfiles=[]
14654
14655         global actions, options, shortmapping
14656
14657         longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
14658         argument_options = {
14659                 "--config-root": {
14660                         "help":"specify the location for portage configuration files",
14661                         "action":"store"
14662                 },
14663                 "--color": {
14664                         "help":"enable or disable color output",
14665                         "type":"choice",
14666                         "choices":("y", "n")
14667                 },
14668
14669                 "--jobs": {
14670
14671                         "help"   : "Specifies the number of packages to build " + \
14672                                 "simultaneously.",
14673
14674                         "action" : "store"
14675                 },
14676
14677                 "--load-average": {
14678
14679                         "help"   :"Specifies that no new builds should be started " + \
14680                                 "if there are other builds running and the load average " + \
14681                                 "is at least LOAD (a floating-point number).",
14682
14683                         "action" : "store"
14684                 },
14685
14686                 "--with-bdeps": {
14687                         "help":"include unnecessary build time dependencies",
14688                         "type":"choice",
14689                         "choices":("y", "n")
14690                 },
14691                 "--reinstall": {
14692                         "help":"specify conditions to trigger package reinstallation",
14693                         "type":"choice",
14694                         "choices":["changed-use"]
14695                 }
14696         }
14697
14698         from optparse import OptionParser
14699         parser = OptionParser()
14700         if parser.has_option("--help"):
14701                 parser.remove_option("--help")
14702
14703         for action_opt in actions:
14704                 parser.add_option("--" + action_opt, action="store_true",
14705                         dest=action_opt.replace("-", "_"), default=False)
14706         for myopt in options:
14707                 parser.add_option(myopt, action="store_true",
14708                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
14709         for shortopt, longopt in shortmapping.iteritems():
14710                 parser.add_option("-" + shortopt, action="store_true",
14711                         dest=longopt.lstrip("--").replace("-", "_"), default=False)
14712         for myalias, myopt in longopt_aliases.iteritems():
14713                 parser.add_option(myalias, action="store_true",
14714                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
14715
14716         for myopt, kwargs in argument_options.iteritems():
14717                 parser.add_option(myopt,
14718                         dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
14719
14720         tmpcmdline = insert_optional_args(tmpcmdline)
14721
14722         myoptions, myargs = parser.parse_args(args=tmpcmdline)
14723
14724         if myoptions.jobs:
14725                 jobs = None
14726                 if myoptions.jobs == "True":
14727                         jobs = True
14728                 else:
14729                         try:
14730                                 jobs = int(myoptions.jobs)
14731                         except ValueError:
14732                                 jobs = -1
14733
14734                 if jobs is not True and \
14735                         jobs < 1:
14736                         jobs = None
14737                         if not silent:
14738                                 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
14739                                         (myoptions.jobs,), noiselevel=-1)
14740
14741                 myoptions.jobs = jobs
14742
14743         if myoptions.load_average:
14744                 try:
14745                         load_average = float(myoptions.load_average)
14746                 except ValueError:
14747                         load_average = 0.0
14748
14749                 if load_average <= 0.0:
14750                         load_average = None
14751                         if not silent:
14752                                 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
14753                                         (myoptions.load_average,), noiselevel=-1)
14754
14755                 myoptions.load_average = load_average
14756
14757         for myopt in options:
14758                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
14759                 if v:
14760                         myopts[myopt] = True
14761
14762         for myopt in argument_options:
14763                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
14764                 if v is not None:
14765                         myopts[myopt] = v
14766
14767         for action_opt in actions:
14768                 v = getattr(myoptions, action_opt.replace("-", "_"))
14769                 if v:
14770                         if myaction:
14771                                 multiple_actions(myaction, action_opt)
14772                                 sys.exit(1)
14773                         myaction = action_opt
14774
14775         myfiles += myargs
14776
14777         return myaction, myopts, myfiles
14778
14779 def validate_ebuild_environment(trees):
14780         for myroot in trees:
14781                 settings = trees[myroot]["vartree"].settings
14782                 settings.validate()
14783
14784 def clear_caches(trees):
14785         for d in trees.itervalues():
14786                 d["porttree"].dbapi.melt()
14787                 d["porttree"].dbapi._aux_cache.clear()
14788                 d["bintree"].dbapi._aux_cache.clear()
14789                 d["bintree"].dbapi._clear_cache()
14790                 d["vartree"].dbapi.linkmap._clear_cache()
14791         portage.dircache.clear()
14792         gc.collect()
14793
14794 def load_emerge_config(trees=None):
14795         kwargs = {}
14796         for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
14797                 v = os.environ.get(envvar, None)
14798                 if v and v.strip():
14799                         kwargs[k] = v
14800         trees = portage.create_trees(trees=trees, **kwargs)
14801
14802         for root, root_trees in trees.iteritems():
14803                 settings = root_trees["vartree"].settings
14804                 setconfig = load_default_config(settings, root_trees)
14805                 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
14806
14807         settings = trees["/"]["vartree"].settings
14808
14809         for myroot in trees:
14810                 if myroot != "/":
14811                         settings = trees[myroot]["vartree"].settings
14812                         break
14813
14814         mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
14815         mtimedb = portage.MtimeDB(mtimedbfile)
14816         
14817         return settings, trees, mtimedb
14818
14819 def adjust_config(myopts, settings):
14820         """Make emerge specific adjustments to the config."""
14821
14822         # To enhance usability, make some vars case insensitive by forcing them to
14823         # lower case.
14824         for myvar in ("AUTOCLEAN", "NOCOLOR"):
14825                 if myvar in settings:
14826                         settings[myvar] = settings[myvar].lower()
14827                         settings.backup_changes(myvar)
14828         del myvar
14829
14830         # Kill noauto as it will break merges otherwise.
14831         if "noauto" in settings.features:
14832                 while "noauto" in settings.features:
14833                         settings.features.remove("noauto")
14834                 settings["FEATURES"] = " ".join(settings.features)
14835                 settings.backup_changes("FEATURES")
14836
14837         CLEAN_DELAY = 5
14838         try:
14839                 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
14840         except ValueError, e:
14841                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14842                 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
14843                         settings["CLEAN_DELAY"], noiselevel=-1)
14844         settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
14845         settings.backup_changes("CLEAN_DELAY")
14846
14847         EMERGE_WARNING_DELAY = 10
14848         try:
14849                 EMERGE_WARNING_DELAY = int(settings.get(
14850                         "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
14851         except ValueError, e:
14852                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14853                 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
14854                         settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
14855         settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
14856         settings.backup_changes("EMERGE_WARNING_DELAY")
14857
14858         if "--quiet" in myopts:
14859                 settings["PORTAGE_QUIET"]="1"
14860                 settings.backup_changes("PORTAGE_QUIET")
14861
14862         if "--verbose" in myopts:
14863                 settings["PORTAGE_VERBOSE"] = "1"
14864                 settings.backup_changes("PORTAGE_VERBOSE")
14865
14866         # Set so that configs will be merged regardless of remembered status
14867         if ("--noconfmem" in myopts):
14868                 settings["NOCONFMEM"]="1"
14869                 settings.backup_changes("NOCONFMEM")
14870
14871         # Set various debug markers... They should be merged somehow.
14872         PORTAGE_DEBUG = 0
14873         try:
14874                 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
14875                 if PORTAGE_DEBUG not in (0, 1):
14876                         portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
14877                                 PORTAGE_DEBUG, noiselevel=-1)
14878                         portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
14879                                 noiselevel=-1)
14880                         PORTAGE_DEBUG = 0
14881         except ValueError, e:
14882                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14883                 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
14884                         settings["PORTAGE_DEBUG"], noiselevel=-1)
14885                 del e
14886         if "--debug" in myopts:
14887                 PORTAGE_DEBUG = 1
14888         settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
14889         settings.backup_changes("PORTAGE_DEBUG")
14890
14891         if settings.get("NOCOLOR") not in ("yes","true"):
14892                 portage.output.havecolor = 1
14893
14894         """The explicit --color < y | n > option overrides the NOCOLOR environment
14895         variable and stdout auto-detection."""
14896         if "--color" in myopts:
14897                 if "y" == myopts["--color"]:
14898                         portage.output.havecolor = 1
14899                         settings["NOCOLOR"] = "false"
14900                 else:
14901                         portage.output.havecolor = 0
14902                         settings["NOCOLOR"] = "true"
14903                 settings.backup_changes("NOCOLOR")
14904         elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
14905                 portage.output.havecolor = 0
14906                 settings["NOCOLOR"] = "true"
14907                 settings.backup_changes("NOCOLOR")
14908
14909 def apply_priorities(settings):
14910         ionice(settings)
14911         nice(settings)
14912
14913 def nice(settings):
14914         try:
14915                 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
14916         except (OSError, ValueError), e:
14917                 out = portage.output.EOutput()
14918                 out.eerror("Failed to change nice value to '%s'" % \
14919                         settings["PORTAGE_NICENESS"])
14920                 out.eerror("%s\n" % str(e))
14921
14922 def ionice(settings):
14923
14924         ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
14925         if ionice_cmd:
14926                 ionice_cmd = shlex.split(ionice_cmd)
14927         if not ionice_cmd:
14928                 return
14929
14930         from portage.util import varexpand
14931         variables = {"PID" : str(os.getpid())}
14932         cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
14933
14934         try:
14935                 rval = portage.process.spawn(cmd, env=os.environ)
14936         except portage.exception.CommandNotFound:
14937                 # The OS kernel probably doesn't support ionice,
14938                 # so return silently.
14939                 return
14940
14941         if rval != os.EX_OK:
14942                 out = portage.output.EOutput()
14943                 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
14944                 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
14945
14946 def display_missing_pkg_set(root_config, set_name):
14947
14948         msg = []
14949         msg.append(("emerge: There are no sets to satisfy '%s'. " + \
14950                 "The following sets exist:") % \
14951                 colorize("INFORM", set_name))
14952         msg.append("")
14953
14954         for s in sorted(root_config.sets):
14955                 msg.append("    %s" % s)
14956         msg.append("")
14957
14958         writemsg_level("".join("%s\n" % l for l in msg),
14959                 level=logging.ERROR, noiselevel=-1)
14960
14961 def expand_set_arguments(myfiles, myaction, root_config):
14962         retval = os.EX_OK
14963         setconfig = root_config.setconfig
14964
14965         sets = setconfig.getSets()
14966
14967         # In order to know exactly which atoms/sets should be added to the
14968         # world file, the depgraph performs set expansion later. It will get
14969         # confused about where the atoms came from if it's not allowed to
14970         # expand them itself.
14971         do_not_expand = (None, )
14972         newargs = []
14973         for a in myfiles:
14974                 if a in ("system", "world"):
14975                         newargs.append(SETPREFIX+a)
14976                 else:
14977                         newargs.append(a)
14978         myfiles = newargs
14979         del newargs
14980         newargs = []
14981
14982         # separators for set arguments
14983         ARG_START = "{"
14984         ARG_END = "}"
14985
14986         # WARNING: all operators must be of equal length
14987         IS_OPERATOR = "/@"
14988         DIFF_OPERATOR = "-@"
14989         UNION_OPERATOR = "+@"
14990         
14991         for i in range(0, len(myfiles)):
14992                 if myfiles[i].startswith(SETPREFIX):
14993                         start = 0
14994                         end = 0
14995                         x = myfiles[i][len(SETPREFIX):]
14996                         newset = ""
14997                         while x:
14998                                 start = x.find(ARG_START)
14999                                 end = x.find(ARG_END)
15000                                 if start > 0 and start < end:
15001                                         namepart = x[:start]
15002                                         argpart = x[start+1:end]
15003                                 
15004                                         # TODO: implement proper quoting
15005                                         args = argpart.split(",")
15006                                         options = {}
15007                                         for a in args:
15008                                                 if "=" in a:
15009                                                         k, v  = a.split("=", 1)
15010                                                         options[k] = v
15011                                                 else:
15012                                                         options[a] = "True"
15013                                         setconfig.update(namepart, options)
15014                                         newset += (x[:start-len(namepart)]+namepart)
15015                                         x = x[end+len(ARG_END):]
15016                                 else:
15017                                         newset += x
15018                                         x = ""
15019                         myfiles[i] = SETPREFIX+newset
15020                                 
15021         sets = setconfig.getSets()
15022
15023         # display errors that occured while loading the SetConfig instance
15024         for e in setconfig.errors:
15025                 print colorize("BAD", "Error during set creation: %s" % e)
15026         
15027         # emerge relies on the existance of sets with names "world" and "system"
15028         required_sets = ("world", "system")
15029         missing_sets = []
15030
15031         for s in required_sets:
15032                 if s not in sets:
15033                         missing_sets.append(s)
15034         if missing_sets:
15035                 if len(missing_sets) > 2:
15036                         missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
15037                         missing_sets_str += ', and "%s"' % missing_sets[-1]
15038                 elif len(missing_sets) == 2:
15039                         missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
15040                 else:
15041                         missing_sets_str = '"%s"' % missing_sets[-1]
15042                 msg = ["emerge: incomplete set configuration, " + \
15043                         "missing set(s): %s" % missing_sets_str]
15044                 if sets:
15045                         msg.append("        sets defined: %s" % ", ".join(sets))
15046                 msg.append("        This usually means that '%s'" % \
15047                         (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
15048                 msg.append("        is missing or corrupt.")
15049                 for line in msg:
15050                         writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
15051                 return (None, 1)
15052         unmerge_actions = ("unmerge", "prune", "clean", "depclean")
15053
15054         for a in myfiles:
15055                 if a.startswith(SETPREFIX):
15056                         # support simple set operations (intersection, difference and union)
15057                         # on the commandline. Expressions are evaluated strictly left-to-right
15058                         if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
15059                                 expression = a[len(SETPREFIX):]
15060                                 expr_sets = []
15061                                 expr_ops = []
15062                                 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
15063                                         is_pos = expression.rfind(IS_OPERATOR)
15064                                         diff_pos = expression.rfind(DIFF_OPERATOR)
15065                                         union_pos = expression.rfind(UNION_OPERATOR)
15066                                         op_pos = max(is_pos, diff_pos, union_pos)
15067                                         s1 = expression[:op_pos]
15068                                         s2 = expression[op_pos+len(IS_OPERATOR):]
15069                                         op = expression[op_pos:op_pos+len(IS_OPERATOR)]
15070                                         if not s2 in sets:
15071                                                 display_missing_pkg_set(root_config, s2)
15072                                                 return (None, 1)
15073                                         expr_sets.insert(0, s2)
15074                                         expr_ops.insert(0, op)
15075                                         expression = s1
15076                                 if not expression in sets:
15077                                         display_missing_pkg_set(root_config, expression)
15078                                         return (None, 1)
15079                                 expr_sets.insert(0, expression)
15080                                 result = set(setconfig.getSetAtoms(expression))
15081                                 for i in range(0, len(expr_ops)):
15082                                         s2 = setconfig.getSetAtoms(expr_sets[i+1])
15083                                         if expr_ops[i] == IS_OPERATOR:
15084                                                 result.intersection_update(s2)
15085                                         elif expr_ops[i] == DIFF_OPERATOR:
15086                                                 result.difference_update(s2)
15087                                         elif expr_ops[i] == UNION_OPERATOR:
15088                                                 result.update(s2)
15089                                         else:
15090                                                 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
15091                                 newargs.extend(result)
15092                         else:                   
15093                                 s = a[len(SETPREFIX):]
15094                                 if s not in sets:
15095                                         display_missing_pkg_set(root_config, s)
15096                                         return (None, 1)
15097                                 setconfig.active.append(s)
15098                                 try:
15099                                         set_atoms = setconfig.getSetAtoms(s)
15100                                 except portage.exception.PackageSetNotFound, e:
15101                                         writemsg_level(("emerge: the given set '%s' " + \
15102                                                 "contains a non-existent set named '%s'.\n") % \
15103                                                 (s, e), level=logging.ERROR, noiselevel=-1)
15104                                         return (None, 1)
15105                                 if myaction in unmerge_actions and \
15106                                                 not sets[s].supportsOperation("unmerge"):
15107                                         sys.stderr.write("emerge: the given set '%s' does " % s + \
15108                                                 "not support unmerge operations\n")
15109                                         retval = 1
15110                                 elif not set_atoms:
15111                                         print "emerge: '%s' is an empty set" % s
15112                                 elif myaction not in do_not_expand:
15113                                         newargs.extend(set_atoms)
15114                                 else:
15115                                         newargs.append(SETPREFIX+s)
15116                                 for e in sets[s].errors:
15117                                         print e
15118                 else:
15119                         newargs.append(a)
15120         return (newargs, retval)
15121
15122 def repo_name_check(trees):
15123         missing_repo_names = set()
15124         for root, root_trees in trees.iteritems():
15125                 if "porttree" in root_trees:
15126                         portdb = root_trees["porttree"].dbapi
15127                         missing_repo_names.update(portdb.porttrees)
15128                         repos = portdb.getRepositories()
15129                         for r in repos:
15130                                 missing_repo_names.discard(portdb.getRepositoryPath(r))
15131                         if portdb.porttree_root in missing_repo_names and \
15132                                 not os.path.exists(os.path.join(
15133                                 portdb.porttree_root, "profiles")):
15134                                 # This is normal if $PORTDIR happens to be empty,
15135                                 # so don't warn about it.
15136                                 missing_repo_names.remove(portdb.porttree_root)
15137
15138         if missing_repo_names:
15139                 msg = []
15140                 msg.append("WARNING: One or more repositories " + \
15141                         "have missing repo_name entries:")
15142                 msg.append("")
15143                 for p in missing_repo_names:
15144                         msg.append("\t%s/profiles/repo_name" % (p,))
15145                 msg.append("")
15146                 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
15147                         "should be a plain text file containing a unique " + \
15148                         "name for the repository on the first line.", 70))
15149                 writemsg_level("".join("%s\n" % l for l in msg),
15150                         level=logging.WARNING, noiselevel=-1)
15151
15152         return bool(missing_repo_names)
15153
15154 def config_protect_check(trees):
15155         for root, root_trees in trees.iteritems():
15156                 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
15157                         msg = "!!! CONFIG_PROTECT is empty"
15158                         if root != "/":
15159                                 msg += " for '%s'" % root
15160                         writemsg_level(msg, level=logging.WARN, noiselevel=-1)
15161
15162 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
15163
15164         if "--quiet" in myopts:
15165                 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15166                 print "!!! one of the following fully-qualified ebuild names instead:\n"
15167                 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15168                         print "    " + colorize("INFORM", cp)
15169                 return
15170
15171         s = search(root_config, spinner, "--searchdesc" in myopts,
15172                 "--quiet" not in myopts, "--usepkg" in myopts,
15173                 "--usepkgonly" in myopts)
15174         null_cp = portage.dep_getkey(insert_category_into_atom(
15175                 arg, "null"))
15176         cat, atom_pn = portage.catsplit(null_cp)
15177         s.searchkey = atom_pn
15178         for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15179                 s.addCP(cp)
15180         s.output()
15181         print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15182         print "!!! one of the above fully-qualified ebuild names instead.\n"
15183
15184 def profile_check(trees, myaction, myopts):
15185         if myaction in ("info", "sync"):
15186                 return os.EX_OK
15187         elif "--version" in myopts or "--help" in myopts:
15188                 return os.EX_OK
15189         for root, root_trees in trees.iteritems():
15190                 if root_trees["root_config"].settings.profiles:
15191                         continue
15192                 # generate some profile related warning messages
15193                 validate_ebuild_environment(trees)
15194                 msg = "If you have just changed your profile configuration, you " + \
15195                         "should revert back to the previous configuration. Due to " + \
15196                         "your current profile being invalid, allowed actions are " + \
15197                         "limited to --help, --info, --sync, and --version."
15198                 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
15199                         level=logging.ERROR, noiselevel=-1)
15200                 return 1
15201         return os.EX_OK
15202
15203 def emerge_main():
15204         global portage  # NFC why this is necessary now - genone
15205         portage._disable_legacy_globals()
15206         # Disable color until we're sure that it should be enabled (after
15207         # EMERGE_DEFAULT_OPTS has been parsed).
15208         portage.output.havecolor = 0
15209         # This first pass is just for options that need to be known as early as
15210         # possible, such as --config-root.  They will be parsed again later,
15211         # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
15212         # the value of --config-root).
15213         myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
15214         if "--debug" in myopts:
15215                 os.environ["PORTAGE_DEBUG"] = "1"
15216         if "--config-root" in myopts:
15217                 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
15218
15219         # Portage needs to ensure a sane umask for the files it creates.
15220         os.umask(022)
15221         settings, trees, mtimedb = load_emerge_config()
15222         portdb = trees[settings["ROOT"]]["porttree"].dbapi
15223         rval = profile_check(trees, myaction, myopts)
15224         if rval != os.EX_OK:
15225                 return rval
15226
15227         if portage._global_updates(trees, mtimedb["updates"]):
15228                 mtimedb.commit()
15229                 # Reload the whole config from scratch.
15230                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15231                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15232
15233         xterm_titles = "notitles" not in settings.features
15234
15235         tmpcmdline = []
15236         if "--ignore-default-opts" not in myopts:
15237                 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
15238         tmpcmdline.extend(sys.argv[1:])
15239         myaction, myopts, myfiles = parse_opts(tmpcmdline)
15240
15241         if "--digest" in myopts:
15242                 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
15243                 # Reload the whole config from scratch so that the portdbapi internal
15244                 # config is updated with new FEATURES.
15245                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15246                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15247
15248         for myroot in trees:
15249                 mysettings =  trees[myroot]["vartree"].settings
15250                 mysettings.unlock()
15251                 adjust_config(myopts, mysettings)
15252                 if "--pretend" not in myopts:
15253                         mysettings["PORTAGE_COUNTER_HASH"] = \
15254                                 trees[myroot]["vartree"].dbapi._counter_hash()
15255                         mysettings.backup_changes("PORTAGE_COUNTER_HASH")
15256                 mysettings.lock()
15257                 del myroot, mysettings
15258
15259         apply_priorities(settings)
15260
15261         spinner = stdout_spinner()
15262         if "candy" in settings.features:
15263                 spinner.update = spinner.update_scroll
15264
15265         if "--quiet" not in myopts:
15266                 portage.deprecated_profile_check(settings=settings)
15267                 repo_name_check(trees)
15268                 config_protect_check(trees)
15269
15270         eclasses_overridden = {}
15271         for mytrees in trees.itervalues():
15272                 mydb = mytrees["porttree"].dbapi
15273                 # Freeze the portdbapi for performance (memoize all xmatch results).
15274                 mydb.freeze()
15275                 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
15276         del mytrees, mydb
15277
15278         if eclasses_overridden and \
15279                 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
15280                 prefix = bad(" * ")
15281                 if len(eclasses_overridden) == 1:
15282                         writemsg(prefix + "Overlay eclass overrides " + \
15283                                 "eclass from PORTDIR:\n", noiselevel=-1)
15284                 else:
15285                         writemsg(prefix + "Overlay eclasses override " + \
15286                                 "eclasses from PORTDIR:\n", noiselevel=-1)
15287                 writemsg(prefix + "\n", noiselevel=-1)
15288                 for eclass_name in sorted(eclasses_overridden):
15289                         writemsg(prefix + "  '%s/%s.eclass'\n" % \
15290                                 (eclasses_overridden[eclass_name], eclass_name),
15291                                 noiselevel=-1)
15292                 writemsg(prefix + "\n", noiselevel=-1)
15293                 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
15294                 "because it will trigger invalidation of cached ebuild metadata " + \
15295                 "that is distributed with the portage tree. If you must " + \
15296                 "override eclasses from PORTDIR then you are advised to add " + \
15297                 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
15298                 "`emerge --regen` after each time that you run `emerge --sync`. " + \
15299                 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
15300                 "you would like to disable this warning."
15301                 from textwrap import wrap
15302                 for line in wrap(msg, 72):
15303                         writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
15304
15305         if "moo" in myfiles:
15306                 print """
15307
15308   Larry loves Gentoo (""" + platform.system() + """)
15309
15310  _______________________
15311 < Have you mooed today? >
15312  -----------------------
15313         \   ^__^
15314          \  (oo)\_______
15315             (__)\       )\/\ 
15316                 ||----w |
15317                 ||     ||
15318
15319 """
15320
15321         for x in myfiles:
15322                 ext = os.path.splitext(x)[1]
15323                 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
15324                         print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
15325                         break
15326
15327         root_config = trees[settings["ROOT"]]["root_config"]
15328         if myaction == "list-sets":
15329                 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
15330                 sys.stdout.flush()
15331                 return os.EX_OK
15332
15333         # only expand sets for actions taking package arguments
15334         oldargs = myfiles[:]
15335         if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
15336                 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
15337                 if retval != os.EX_OK:
15338                         return retval
15339
15340                 # Need to handle empty sets specially, otherwise emerge will react 
15341                 # with the help message for empty argument lists
15342                 if oldargs and not myfiles:
15343                         print "emerge: no targets left after set expansion"
15344                         return 0
15345
15346         if ("--tree" in myopts) and ("--columns" in myopts):
15347                 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
15348                 return 1
15349
15350         if ("--quiet" in myopts):
15351                 spinner.update = spinner.update_quiet
15352                 portage.util.noiselimit = -1
15353
15354         # Always create packages if FEATURES=buildpkg
15355         # Imply --buildpkg if --buildpkgonly
15356         if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
15357                 if "--buildpkg" not in myopts:
15358                         myopts["--buildpkg"] = True
15359
15360         # Also allow -S to invoke search action (-sS)
15361         if ("--searchdesc" in myopts):
15362                 if myaction and myaction != "search":
15363                         myfiles.append(myaction)
15364                 if "--search" not in myopts:
15365                         myopts["--search"] = True
15366                 myaction = "search"
15367
15368         # Always try and fetch binary packages if FEATURES=getbinpkg
15369         if ("getbinpkg" in settings.features):
15370                 myopts["--getbinpkg"] = True
15371
15372         if "--buildpkgonly" in myopts:
15373                 # --buildpkgonly will not merge anything, so
15374                 # it cancels all binary package options.
15375                 for opt in ("--getbinpkg", "--getbinpkgonly",
15376                         "--usepkg", "--usepkgonly"):
15377                         myopts.pop(opt, None)
15378
15379         if "--fetch-all-uri" in myopts:
15380                 myopts["--fetchonly"] = True
15381
15382         if "--skipfirst" in myopts and "--resume" not in myopts:
15383                 myopts["--resume"] = True
15384
15385         if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
15386                 myopts["--usepkgonly"] = True
15387
15388         if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
15389                 myopts["--getbinpkg"] = True
15390
15391         if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
15392                 myopts["--usepkg"] = True
15393
15394         # Also allow -K to apply --usepkg/-k
15395         if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
15396                 myopts["--usepkg"] = True
15397
15398         # Allow -p to remove --ask
15399         if ("--pretend" in myopts) and ("--ask" in myopts):
15400                 print ">>> --pretend disables --ask... removing --ask from options."
15401                 del myopts["--ask"]
15402
15403         # forbid --ask when not in a terminal
15404         # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
15405         if ("--ask" in myopts) and (not sys.stdin.isatty()):
15406                 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
15407                         noiselevel=-1)
15408                 return 1
15409
15410         if settings.get("PORTAGE_DEBUG", "") == "1":
15411                 spinner.update = spinner.update_quiet
15412                 portage.debug=1
15413                 if "python-trace" in settings.features:
15414                         import portage.debug
15415                         portage.debug.set_trace(True)
15416
15417         if not ("--quiet" in myopts):
15418                 if not sys.stdout.isatty() or ("--nospinner" in myopts):
15419                         spinner.update = spinner.update_basic
15420
15421         if "--version" in myopts:
15422                 print getportageversion(settings["PORTDIR"], settings["ROOT"],
15423                         settings.profile_path, settings["CHOST"],
15424                         trees[settings["ROOT"]]["vartree"].dbapi)
15425                 return 0
15426         elif "--help" in myopts:
15427                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15428                 return 0
15429
15430         if "--debug" in myopts:
15431                 print "myaction", myaction
15432                 print "myopts", myopts
15433
15434         if not myaction and not myfiles and "--resume" not in myopts:
15435                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15436                 return 1
15437
15438         pretend = "--pretend" in myopts
15439         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
15440         buildpkgonly = "--buildpkgonly" in myopts
15441
15442         # check if root user is the current user for the actions where emerge needs this
15443         if portage.secpass < 2:
15444                 # We've already allowed "--version" and "--help" above.
15445                 if "--pretend" not in myopts and myaction not in ("search","info"):
15446                         need_superuser = not \
15447                                 (fetchonly or \
15448                                 (buildpkgonly and secpass >= 1) or \
15449                                 myaction in ("metadata", "regen") or \
15450                                 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
15451                         if portage.secpass < 1 or \
15452                                 need_superuser:
15453                                 if need_superuser:
15454                                         access_desc = "superuser"
15455                                 else:
15456                                         access_desc = "portage group"
15457                                 # Always show portage_group_warning() when only portage group
15458                                 # access is required but the user is not in the portage group.
15459                                 from portage.data import portage_group_warning
15460                                 if "--ask" in myopts:
15461                                         myopts["--pretend"] = True
15462                                         del myopts["--ask"]
15463                                         print ("%s access is required... " + \
15464                                                 "adding --pretend to options.\n") % access_desc
15465                                         if portage.secpass < 1 and not need_superuser:
15466                                                 portage_group_warning()
15467                                 else:
15468                                         sys.stderr.write(("emerge: %s access is " + \
15469                                                 "required.\n\n") % access_desc)
15470                                         if portage.secpass < 1 and not need_superuser:
15471                                                 portage_group_warning()
15472                                         return 1
15473
15474         disable_emergelog = False
15475         for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
15476                 if x in myopts:
15477                         disable_emergelog = True
15478                         break
15479         if myaction in ("search", "info"):
15480                 disable_emergelog = True
15481         if disable_emergelog:
15482                 """ Disable emergelog for everything except build or unmerge
15483                 operations.  This helps minimize parallel emerge.log entries that can
15484                 confuse log parsers.  We especially want it disabled during
15485                 parallel-fetch, which uses --resume --fetchonly."""
15486                 global emergelog
15487                 def emergelog(*pargs, **kargs):
15488                         pass
15489
15490         if not "--pretend" in myopts:
15491                 emergelog(xterm_titles, "Started emerge on: "+\
15492                         time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
15493                 myelogstr=""
15494                 if myopts:
15495                         myelogstr=" ".join(myopts)
15496                 if myaction:
15497                         myelogstr+=" "+myaction
15498                 if myfiles:
15499                         myelogstr += " " + " ".join(oldargs)
15500                 emergelog(xterm_titles, " *** emerge " + myelogstr)
15501         del oldargs
15502
15503         def emergeexitsig(signum, frame):
15504                 signal.signal(signal.SIGINT, signal.SIG_IGN)
15505                 signal.signal(signal.SIGTERM, signal.SIG_IGN)
15506                 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
15507                 sys.exit(100+signum)
15508         signal.signal(signal.SIGINT, emergeexitsig)
15509         signal.signal(signal.SIGTERM, emergeexitsig)
15510
15511         def emergeexit():
15512                 """This gets out final log message in before we quit."""
15513                 if "--pretend" not in myopts:
15514                         emergelog(xterm_titles, " *** terminating.")
15515                 if "notitles" not in settings.features:
15516                         xtermTitleReset()
15517         portage.atexit_register(emergeexit)
15518
15519         if myaction in ("config", "metadata", "regen", "sync"):
15520                 if "--pretend" in myopts:
15521                         sys.stderr.write(("emerge: The '%s' action does " + \
15522                                 "not support '--pretend'.\n") % myaction)
15523                         return 1
15524
15525         if "sync" == myaction:
15526                 return action_sync(settings, trees, mtimedb, myopts, myaction)
15527         elif "metadata" == myaction:
15528                 action_metadata(settings, portdb, myopts)
15529         elif myaction=="regen":
15530                 validate_ebuild_environment(trees)
15531                 return action_regen(settings, portdb, myopts.get("--jobs"),
15532                         myopts.get("--load-average"))
15533         # HELP action
15534         elif "config"==myaction:
15535                 validate_ebuild_environment(trees)
15536                 action_config(settings, trees, myopts, myfiles)
15537
15538         # SEARCH action
15539         elif "search"==myaction:
15540                 validate_ebuild_environment(trees)
15541                 action_search(trees[settings["ROOT"]]["root_config"],
15542                         myopts, myfiles, spinner)
15543         elif myaction in ("clean", "unmerge") or \
15544                 (myaction == "prune" and "--nodeps" in myopts):
15545                 validate_ebuild_environment(trees)
15546
15547                 # Ensure atoms are valid before calling unmerge().
15548                 # For backward compat, leading '=' is not required.
15549                 for x in myfiles:
15550                         if is_valid_package_atom(x) or \
15551                                 is_valid_package_atom("=" + x):
15552                                 continue
15553                         msg = []
15554                         msg.append("'%s' is not a valid package atom." % (x,))
15555                         msg.append("Please check ebuild(5) for full details.")
15556                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15557                                 level=logging.ERROR, noiselevel=-1)
15558                         return 1
15559
15560                 # When given a list of atoms, unmerge
15561                 # them in the order given.
15562                 ordered = myaction == "unmerge"
15563                 if 1 == unmerge(root_config, myopts, myaction, myfiles,
15564                         mtimedb["ldpath"], ordered=ordered):
15565                         if not (buildpkgonly or fetchonly or pretend):
15566                                 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15567
15568         elif myaction in ("depclean", "info", "prune"):
15569
15570                 # Ensure atoms are valid before calling unmerge().
15571                 vardb = trees[settings["ROOT"]]["vartree"].dbapi
15572                 valid_atoms = []
15573                 for x in myfiles:
15574                         if is_valid_package_atom(x):
15575                                 try:
15576                                         valid_atoms.append(
15577                                                 portage.dep_expand(x, mydb=vardb, settings=settings))
15578                                 except portage.exception.AmbiguousPackageName, e:
15579                                         msg = "The short ebuild name \"" + x + \
15580                                                 "\" is ambiguous.  Please specify " + \
15581                                                 "one of the following " + \
15582                                                 "fully-qualified ebuild names instead:"
15583                                         for line in textwrap.wrap(msg, 70):
15584                                                 writemsg_level("!!! %s\n" % (line,),
15585                                                         level=logging.ERROR, noiselevel=-1)
15586                                         for i in e[0]:
15587                                                 writemsg_level("    %s\n" % colorize("INFORM", i),
15588                                                         level=logging.ERROR, noiselevel=-1)
15589                                         writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
15590                                         return 1
15591                                 continue
15592                         msg = []
15593                         msg.append("'%s' is not a valid package atom." % (x,))
15594                         msg.append("Please check ebuild(5) for full details.")
15595                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15596                                 level=logging.ERROR, noiselevel=-1)
15597                         return 1
15598
15599                 if myaction == "info":
15600                         return action_info(settings, trees, myopts, valid_atoms)
15601
15602                 validate_ebuild_environment(trees)
15603                 action_depclean(settings, trees, mtimedb["ldpath"],
15604                         myopts, myaction, valid_atoms, spinner)
15605                 if not (buildpkgonly or fetchonly or pretend):
15606                         post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15607         # "update", "system", or just process files:
15608         else:
15609                 validate_ebuild_environment(trees)
15610                 if "--pretend" not in myopts:
15611                         display_news_notification(root_config, myopts)
15612                 retval = action_build(settings, trees, mtimedb,
15613                         myopts, myaction, myfiles, spinner)
15614                 root_config = trees[settings["ROOT"]]["root_config"]
15615                 post_emerge(root_config, myopts, mtimedb, retval)
15616
15617                 return retval