Bug #261992 - Inside Binpkg._prefetch_exit(), avoid potention issues with
[portage.git] / pym / _emerge / __init__.py
1 #!/usr/bin/python -O
2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id$
5
6 import array
7 from collections import deque
8 import fcntl
9 import formatter
10 import logging
11 import pwd
12 import select
13 import shlex
14 import shutil
15 import signal
16 import sys
17 import textwrap
18 import urlparse
19 import weakref
20 import gc
21 import os, stat
22 import platform
23
24 try:
25         import portage
26 except ImportError:
27         from os import path as osp
28         sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
29         import portage
30
31 from portage import digraph
32 from portage.const import NEWS_LIB_PATH
33
34 import _emerge.help
35 import portage.xpak, commands, errno, re, socket, time
36 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
37         nc_len, red, teal, turquoise, xtermTitle, \
38         xtermTitleReset, yellow
39 from portage.output import create_color_func
40 good = create_color_func("GOOD")
41 bad = create_color_func("BAD")
42 # white looks bad on terminals with white background
43 from portage.output import bold as white
44
45 import portage.elog
46 import portage.dep
47 portage.dep._dep_check_strict = True
48 import portage.util
49 import portage.locks
50 import portage.exception
51 from portage.data import secpass
52 from portage.elog.messages import eerror
53 from portage.util import normalize_path as normpath
54 from portage.util import cmp_sort_key, writemsg, writemsg_level
55 from portage.sets import load_default_config, SETPREFIX
56 from portage.sets.base import InternalPackageSet
57
58 from itertools import chain, izip
59
60 try:
61         import cPickle as pickle
62 except ImportError:
63         import pickle
64
65 try:
66         from cStringIO import StringIO
67 except ImportError:
68         from StringIO import StringIO
69
70 class stdout_spinner(object):
71         scroll_msgs = [
72                 "Gentoo Rocks ("+platform.system()+")",
73                 "Thank you for using Gentoo. :)",
74                 "Are you actually trying to read this?",
75                 "How many times have you stared at this?",
76                 "We are generating the cache right now",
77                 "You are paying too much attention.",
78                 "A theory is better than its explanation.",
79                 "Phasers locked on target, Captain.",
80                 "Thrashing is just virtual crashing.",
81                 "To be is to program.",
82                 "Real Users hate Real Programmers.",
83                 "When all else fails, read the instructions.",
84                 "Functionality breeds Contempt.",
85                 "The future lies ahead.",
86                 "3.1415926535897932384626433832795028841971694",
87                 "Sometimes insanity is the only alternative.",
88                 "Inaccuracy saves a world of explanation.",
89         ]
90
91         twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
92
93         def __init__(self):
94                 self.spinpos = 0
95                 self.update = self.update_twirl
96                 self.scroll_sequence = self.scroll_msgs[
97                         int(time.time() * 100) % len(self.scroll_msgs)]
98                 self.last_update = 0
99                 self.min_display_latency = 0.05
100
101         def _return_early(self):
102                 """
103                 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
104                 each update* method should return without doing any output when this
105                 method returns True.
106                 """
107                 cur_time = time.time()
108                 if cur_time - self.last_update < self.min_display_latency:
109                         return True
110                 self.last_update = cur_time
111                 return False
112
113         def update_basic(self):
114                 self.spinpos = (self.spinpos + 1) % 500
115                 if self._return_early():
116                         return
117                 if (self.spinpos % 100) == 0:
118                         if self.spinpos == 0:
119                                 sys.stdout.write(". ")
120                         else:
121                                 sys.stdout.write(".")
122                 sys.stdout.flush()
123
124         def update_scroll(self):
125                 if self._return_early():
126                         return
127                 if(self.spinpos >= len(self.scroll_sequence)):
128                         sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
129                                 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
130                 else:
131                         sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
132                 sys.stdout.flush()
133                 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
134
135         def update_twirl(self):
136                 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
137                 if self._return_early():
138                         return
139                 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
140                 sys.stdout.flush()
141
142         def update_quiet(self):
143                 return
144
145 def userquery(prompt, responses=None, colours=None):
146         """Displays a prompt and a set of responses, then waits for a response
147         which is checked against the responses and the first to match is
148         returned.  An empty response will match the first value in responses.  The
149         input buffer is *not* cleared prior to the prompt!
150
151         prompt: a String.
152         responses: a List of Strings.
153         colours: a List of Functions taking and returning a String, used to
154         process the responses for display. Typically these will be functions
155         like red() but could be e.g. lambda x: "DisplayString".
156         If responses is omitted, defaults to ["Yes", "No"], [green, red].
157         If only colours is omitted, defaults to [bold, ...].
158
159         Returns a member of the List responses. (If called without optional
160         arguments, returns "Yes" or "No".)
161         KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
162         printed."""
163         if responses is None:
164                 responses = ["Yes", "No"]
165                 colours = [
166                         create_color_func("PROMPT_CHOICE_DEFAULT"),
167                         create_color_func("PROMPT_CHOICE_OTHER")
168                 ]
169         elif colours is None:
170                 colours=[bold]
171         colours=(colours*len(responses))[:len(responses)]
172         print bold(prompt),
173         try:
174                 while True:
175                         response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
176                         for key in responses:
177                                 # An empty response will match the first value in responses.
178                                 if response.upper()==key[:len(response)].upper():
179                                         return key
180                         print "Sorry, response '%s' not understood." % response,
181         except (EOFError, KeyboardInterrupt):
182                 print "Interrupted."
183                 sys.exit(1)
184
185 actions = frozenset([
186 "clean", "config", "depclean",
187 "info", "list-sets", "metadata",
188 "prune", "regen",  "search",
189 "sync",  "unmerge", "version",
190 ])
191 options=[
192 "--ask",          "--alphabetical",
193 "--buildpkg",     "--buildpkgonly",
194 "--changelog",    "--columns",
195 "--complete-graph",
196 "--debug",        "--deep",
197 "--digest",
198 "--emptytree",
199 "--fetchonly",    "--fetch-all-uri",
200 "--getbinpkg",    "--getbinpkgonly",
201 "--help",         "--ignore-default-opts",
202 "--keep-going",
203 "--noconfmem",
204 "--newuse",       "--nocolor",
205 "--nodeps",       "--noreplace",
206 "--nospinner",    "--oneshot",
207 "--onlydeps",     "--pretend",
208 "--quiet",        "--resume",
209 "--searchdesc",   "--selective",
210 "--skipfirst",
211 "--tree",
212 "--update",
213 "--usepkg",       "--usepkgonly",
214 "--verbose",
215 ]
216
217 shortmapping={
218 "1":"--oneshot",
219 "a":"--ask",
220 "b":"--buildpkg",  "B":"--buildpkgonly",
221 "c":"--clean",     "C":"--unmerge",
222 "d":"--debug",     "D":"--deep",
223 "e":"--emptytree",
224 "f":"--fetchonly", "F":"--fetch-all-uri",
225 "g":"--getbinpkg", "G":"--getbinpkgonly",
226 "h":"--help",
227 "k":"--usepkg",    "K":"--usepkgonly",
228 "l":"--changelog",
229 "n":"--noreplace", "N":"--newuse",
230 "o":"--onlydeps",  "O":"--nodeps",
231 "p":"--pretend",   "P":"--prune",
232 "q":"--quiet",
233 "s":"--search",    "S":"--searchdesc",
234 "t":"--tree",
235 "u":"--update",
236 "v":"--verbose",   "V":"--version"
237 }
238
239 def emergelog(xterm_titles, mystr, short_msg=None):
240         if xterm_titles and short_msg:
241                 if "HOSTNAME" in os.environ:
242                         short_msg = os.environ["HOSTNAME"]+": "+short_msg
243                 xtermTitle(short_msg)
244         try:
245                 file_path = "/var/log/emerge.log"
246                 mylogfile = open(file_path, "a")
247                 portage.util.apply_secpass_permissions(file_path,
248                         uid=portage.portage_uid, gid=portage.portage_gid,
249                         mode=0660)
250                 mylock = None
251                 try:
252                         mylock = portage.locks.lockfile(mylogfile)
253                         # seek because we may have gotten held up by the lock.
254                         # if so, we may not be positioned at the end of the file.
255                         mylogfile.seek(0, 2)
256                         mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
257                         mylogfile.flush()
258                 finally:
259                         if mylock:
260                                 portage.locks.unlockfile(mylock)
261                         mylogfile.close()
262         except (IOError,OSError,portage.exception.PortageException), e:
263                 if secpass >= 1:
264                         print >> sys.stderr, "emergelog():",e
265
266 def countdown(secs=5, doing="Starting"):
267         if secs:
268                 print ">>> Waiting",secs,"seconds before starting..."
269                 print ">>> (Control-C to abort)...\n"+doing+" in: ",
270                 ticks=range(secs)
271                 ticks.reverse()
272                 for sec in ticks:
273                         sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
274                         sys.stdout.flush()
275                         time.sleep(1)
276                 print
277
278 # formats a size given in bytes nicely
279 def format_size(mysize):
280         if isinstance(mysize, basestring):
281                 return mysize
282         if 0 != mysize % 1024:
283                 # Always round up to the next kB so that it doesn't show 0 kB when
284                 # some small file still needs to be fetched.
285                 mysize += 1024 - mysize % 1024
286         mystr=str(mysize/1024)
287         mycount=len(mystr)
288         while (mycount > 3):
289                 mycount-=3
290                 mystr=mystr[:mycount]+","+mystr[mycount:]
291         return mystr+" kB"
292
293
294 def getgccversion(chost):
295         """
296         rtype: C{str}
297         return:  the current in-use gcc version
298         """
299
300         gcc_ver_command = 'gcc -dumpversion'
301         gcc_ver_prefix = 'gcc-'
302
303         gcc_not_found_error = red(
304         "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
305         "!!! to update the environment of this terminal and possibly\n" +
306         "!!! other terminals also.\n"
307         )
308
309         mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
310         if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
311                 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
312
313         mystatus, myoutput = commands.getstatusoutput(
314                 chost + "-" + gcc_ver_command)
315         if mystatus == os.EX_OK:
316                 return gcc_ver_prefix + myoutput
317
318         mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
319         if mystatus == os.EX_OK:
320                 return gcc_ver_prefix + myoutput
321
322         portage.writemsg(gcc_not_found_error, noiselevel=-1)
323         return "[unavailable]"
324
325 def getportageversion(portdir, target_root, profile, chost, vardb):
326         profilever = "unavailable"
327         if profile:
328                 realpath = os.path.realpath(profile)
329                 basepath   = os.path.realpath(os.path.join(portdir, "profiles"))
330                 if realpath.startswith(basepath):
331                         profilever = realpath[1 + len(basepath):]
332                 else:
333                         try:
334                                 profilever = "!" + os.readlink(profile)
335                         except (OSError):
336                                 pass
337                 del realpath, basepath
338
339         libcver=[]
340         libclist  = vardb.match("virtual/libc")
341         libclist += vardb.match("virtual/glibc")
342         libclist  = portage.util.unique_array(libclist)
343         for x in libclist:
344                 xs=portage.catpkgsplit(x)
345                 if libcver:
346                         libcver+=","+"-".join(xs[1:])
347                 else:
348                         libcver="-".join(xs[1:])
349         if libcver==[]:
350                 libcver="unavailable"
351
352         gccver = getgccversion(chost)
353         unameout=platform.release()+" "+platform.machine()
354
355         return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
356
357 def create_depgraph_params(myopts, myaction):
358         #configure emerge engine parameters
359         #
360         # self:      include _this_ package regardless of if it is merged.
361         # selective: exclude the package if it is merged
362         # recurse:   go into the dependencies
363         # deep:      go into the dependencies of already merged packages
364         # empty:     pretend nothing is merged
365         # complete:  completely account for all known dependencies
366         # remove:    build graph for use in removing packages
367         myparams = set(["recurse"])
368
369         if myaction == "remove":
370                 myparams.add("remove")
371                 myparams.add("complete")
372                 return myparams
373
374         if "--update" in myopts or \
375                 "--newuse" in myopts or \
376                 "--reinstall" in myopts or \
377                 "--noreplace" in myopts:
378                 myparams.add("selective")
379         if "--emptytree" in myopts:
380                 myparams.add("empty")
381                 myparams.discard("selective")
382         if "--nodeps" in myopts:
383                 myparams.discard("recurse")
384         if "--deep" in myopts:
385                 myparams.add("deep")
386         if "--complete-graph" in myopts:
387                 myparams.add("complete")
388         return myparams
389
390 # search functionality
391 class search(object):
392
393         #
394         # class constants
395         #
396         VERSION_SHORT=1
397         VERSION_RELEASE=2
398
399         #
400         # public interface
401         #
402         def __init__(self, root_config, spinner, searchdesc,
403                 verbose, usepkg, usepkgonly):
404                 """Searches the available and installed packages for the supplied search key.
405                 The list of available and installed packages is created at object instantiation.
406                 This makes successive searches faster."""
407                 self.settings = root_config.settings
408                 self.vartree = root_config.trees["vartree"]
409                 self.spinner = spinner
410                 self.verbose = verbose
411                 self.searchdesc = searchdesc
412                 self.root_config = root_config
413                 self.setconfig = root_config.setconfig
414                 self.matches = {"pkg" : []}
415                 self.mlen = 0
416
417                 def fake_portdb():
418                         pass
419                 self.portdb = fake_portdb
420                 for attrib in ("aux_get", "cp_all",
421                         "xmatch", "findname", "getFetchMap"):
422                         setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
423
424                 self._dbs = []
425
426                 portdb = root_config.trees["porttree"].dbapi
427                 bindb = root_config.trees["bintree"].dbapi
428                 vardb = root_config.trees["vartree"].dbapi
429
430                 if not usepkgonly and portdb._have_root_eclass_dir:
431                         self._dbs.append(portdb)
432
433                 if (usepkg or usepkgonly) and bindb.cp_all():
434                         self._dbs.append(bindb)
435
436                 self._dbs.append(vardb)
437                 self._portdb = portdb
438
439         def _cp_all(self):
440                 cp_all = set()
441                 for db in self._dbs:
442                         cp_all.update(db.cp_all())
443                 return list(sorted(cp_all))
444
445         def _aux_get(self, *args, **kwargs):
446                 for db in self._dbs:
447                         try:
448                                 return db.aux_get(*args, **kwargs)
449                         except KeyError:
450                                 pass
451                 raise
452
453         def _findname(self, *args, **kwargs):
454                 for db in self._dbs:
455                         if db is not self._portdb:
456                                 # We don't want findname to return anything
457                                 # unless it's an ebuild in a portage tree.
458                                 # Otherwise, it's already built and we don't
459                                 # care about it.
460                                 continue
461                         func = getattr(db, "findname", None)
462                         if func:
463                                 value = func(*args, **kwargs)
464                                 if value:
465                                         return value
466                 return None
467
468         def _getFetchMap(self, *args, **kwargs):
469                 for db in self._dbs:
470                         func = getattr(db, "getFetchMap", None)
471                         if func:
472                                 value = func(*args, **kwargs)
473                                 if value:
474                                         return value
475                 return {}
476
477         def _visible(self, db, cpv, metadata):
478                 installed = db is self.vartree.dbapi
479                 built = installed or db is not self._portdb
480                 pkg_type = "ebuild"
481                 if installed:
482                         pkg_type = "installed"
483                 elif built:
484                         pkg_type = "binary"
485                 return visible(self.settings,
486                         Package(type_name=pkg_type, root_config=self.root_config,
487                         cpv=cpv, built=built, installed=installed, metadata=metadata))
488
489         def _xmatch(self, level, atom):
490                 """
491                 This method does not expand old-style virtuals because it
492                 is restricted to returning matches for a single ${CATEGORY}/${PN}
493                 and old-style virual matches unreliable for that when querying
494                 multiple package databases. If necessary, old-style virtuals
495                 can be performed on atoms prior to calling this method.
496                 """
497                 cp = portage.dep_getkey(atom)
498                 if level == "match-all":
499                         matches = set()
500                         for db in self._dbs:
501                                 if hasattr(db, "xmatch"):
502                                         matches.update(db.xmatch(level, atom))
503                                 else:
504                                         matches.update(db.match(atom))
505                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
506                         db._cpv_sort_ascending(result)
507                 elif level == "match-visible":
508                         matches = set()
509                         for db in self._dbs:
510                                 if hasattr(db, "xmatch"):
511                                         matches.update(db.xmatch(level, atom))
512                                 else:
513                                         db_keys = list(db._aux_cache_keys)
514                                         for cpv in db.match(atom):
515                                                 metadata = izip(db_keys,
516                                                         db.aux_get(cpv, db_keys))
517                                                 if not self._visible(db, cpv, metadata):
518                                                         continue
519                                                 matches.add(cpv)
520                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
521                         db._cpv_sort_ascending(result)
522                 elif level == "bestmatch-visible":
523                         result = None
524                         for db in self._dbs:
525                                 if hasattr(db, "xmatch"):
526                                         cpv = db.xmatch("bestmatch-visible", atom)
527                                         if not cpv or portage.cpv_getkey(cpv) != cp:
528                                                 continue
529                                         if not result or cpv == portage.best([cpv, result]):
530                                                 result = cpv
531                                 else:
532                                         db_keys = Package.metadata_keys
533                                         # break out of this loop with highest visible
534                                         # match, checked in descending order
535                                         for cpv in reversed(db.match(atom)):
536                                                 if portage.cpv_getkey(cpv) != cp:
537                                                         continue
538                                                 metadata = izip(db_keys,
539                                                         db.aux_get(cpv, db_keys))
540                                                 if not self._visible(db, cpv, metadata):
541                                                         continue
542                                                 if not result or cpv == portage.best([cpv, result]):
543                                                         result = cpv
544                                                 break
545                 else:
546                         raise NotImplementedError(level)
547                 return result
548
549         def execute(self,searchkey):
550                 """Performs the search for the supplied search key"""
551                 match_category = 0
552                 self.searchkey=searchkey
553                 self.packagematches = []
554                 if self.searchdesc:
555                         self.searchdesc=1
556                         self.matches = {"pkg":[], "desc":[], "set":[]}
557                 else:
558                         self.searchdesc=0
559                         self.matches = {"pkg":[], "set":[]}
560                 print "Searching...   ",
561
562                 regexsearch = False
563                 if self.searchkey.startswith('%'):
564                         regexsearch = True
565                         self.searchkey = self.searchkey[1:]
566                 if self.searchkey.startswith('@'):
567                         match_category = 1
568                         self.searchkey = self.searchkey[1:]
569                 if regexsearch:
570                         self.searchre=re.compile(self.searchkey,re.I)
571                 else:
572                         self.searchre=re.compile(re.escape(self.searchkey), re.I)
573                 for package in self.portdb.cp_all():
574                         self.spinner.update()
575
576                         if match_category:
577                                 match_string  = package[:]
578                         else:
579                                 match_string  = package.split("/")[-1]
580
581                         masked=0
582                         if self.searchre.search(match_string):
583                                 if not self.portdb.xmatch("match-visible", package):
584                                         masked=1
585                                 self.matches["pkg"].append([package,masked])
586                         elif self.searchdesc: # DESCRIPTION searching
587                                 full_package = self.portdb.xmatch("bestmatch-visible", package)
588                                 if not full_package:
589                                         #no match found; we don't want to query description
590                                         full_package = portage.best(
591                                                 self.portdb.xmatch("match-all", package))
592                                         if not full_package:
593                                                 continue
594                                         else:
595                                                 masked=1
596                                 try:
597                                         full_desc = self.portdb.aux_get(
598                                                 full_package, ["DESCRIPTION"])[0]
599                                 except KeyError:
600                                         print "emerge: search: aux_get() failed, skipping"
601                                         continue
602                                 if self.searchre.search(full_desc):
603                                         self.matches["desc"].append([full_package,masked])
604
605                 self.sdict = self.setconfig.getSets()
606                 for setname in self.sdict:
607                         self.spinner.update()
608                         if match_category:
609                                 match_string = setname
610                         else:
611                                 match_string = setname.split("/")[-1]
612                         
613                         if self.searchre.search(match_string):
614                                 self.matches["set"].append([setname, False])
615                         elif self.searchdesc:
616                                 if self.searchre.search(
617                                         self.sdict[setname].getMetadata("DESCRIPTION")):
618                                         self.matches["set"].append([setname, False])
619                         
620                 self.mlen=0
621                 for mtype in self.matches:
622                         self.matches[mtype].sort()
623                         self.mlen += len(self.matches[mtype])
624
625         def addCP(self, cp):
626                 if not self.portdb.xmatch("match-all", cp):
627                         return
628                 masked = 0
629                 if not self.portdb.xmatch("bestmatch-visible", cp):
630                         masked = 1
631                 self.matches["pkg"].append([cp, masked])
632                 self.mlen += 1
633
634         def output(self):
635                 """Outputs the results of the search."""
636                 print "\b\b  \n[ Results for search key : "+white(self.searchkey)+" ]"
637                 print "[ Applications found : "+white(str(self.mlen))+" ]"
638                 print " "
639                 vardb = self.vartree.dbapi
640                 for mtype in self.matches:
641                         for match,masked in self.matches[mtype]:
642                                 full_package = None
643                                 if mtype == "pkg":
644                                         catpack = match
645                                         full_package = self.portdb.xmatch(
646                                                 "bestmatch-visible", match)
647                                         if not full_package:
648                                                 #no match found; we don't want to query description
649                                                 masked=1
650                                                 full_package = portage.best(
651                                                         self.portdb.xmatch("match-all",match))
652                                 elif mtype == "desc":
653                                         full_package = match
654                                         match        = portage.cpv_getkey(match)
655                                 elif mtype == "set":
656                                         print green("*")+"  "+white(match)
657                                         print "     ", darkgreen("Description:")+"  ", self.sdict[match].getMetadata("DESCRIPTION")
658                                         print
659                                 if full_package:
660                                         try:
661                                                 desc, homepage, license = self.portdb.aux_get(
662                                                         full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
663                                         except KeyError:
664                                                 print "emerge: search: aux_get() failed, skipping"
665                                                 continue
666                                         if masked:
667                                                 print green("*")+"  "+white(match)+" "+red("[ Masked ]")
668                                         else:
669                                                 print green("*")+"  "+white(match)
670                                         myversion = self.getVersion(full_package, search.VERSION_RELEASE)
671
672                                         mysum = [0,0]
673                                         file_size_str = None
674                                         mycat = match.split("/")[0]
675                                         mypkg = match.split("/")[1]
676                                         mycpv = match + "-" + myversion
677                                         myebuild = self.portdb.findname(mycpv)
678                                         if myebuild:
679                                                 pkgdir = os.path.dirname(myebuild)
680                                                 from portage import manifest
681                                                 mf = manifest.Manifest(
682                                                         pkgdir, self.settings["DISTDIR"])
683                                                 try:
684                                                         uri_map = self.portdb.getFetchMap(mycpv)
685                                                 except portage.exception.InvalidDependString, e:
686                                                         file_size_str = "Unknown (%s)" % (e,)
687                                                         del e
688                                                 else:
689                                                         try:
690                                                                 mysum[0] = mf.getDistfilesSize(uri_map)
691                                                         except KeyError, e:
692                                                                 file_size_str = "Unknown (missing " + \
693                                                                         "digest for %s)" % (e,)
694                                                                 del e
695
696                                         available = False
697                                         for db in self._dbs:
698                                                 if db is not vardb and \
699                                                         db.cpv_exists(mycpv):
700                                                         available = True
701                                                         if not myebuild and hasattr(db, "bintree"):
702                                                                 myebuild = db.bintree.getname(mycpv)
703                                                                 try:
704                                                                         mysum[0] = os.stat(myebuild).st_size
705                                                                 except OSError:
706                                                                         myebuild = None
707                                                         break
708
709                                         if myebuild and file_size_str is None:
710                                                 mystr = str(mysum[0] / 1024)
711                                                 mycount = len(mystr)
712                                                 while (mycount > 3):
713                                                         mycount -= 3
714                                                         mystr = mystr[:mycount] + "," + mystr[mycount:]
715                                                 file_size_str = mystr + " kB"
716
717                                         if self.verbose:
718                                                 if available:
719                                                         print "     ", darkgreen("Latest version available:"),myversion
720                                                 print "     ", self.getInstallationStatus(mycat+'/'+mypkg)
721                                                 if myebuild:
722                                                         print "      %s %s" % \
723                                                                 (darkgreen("Size of files:"), file_size_str)
724                                                 print "     ", darkgreen("Homepage:")+"     ",homepage
725                                                 print "     ", darkgreen("Description:")+"  ",desc
726                                                 print "     ", darkgreen("License:")+"      ",license
727                                                 print
728         #
729         # private interface
730         #
731         def getInstallationStatus(self,package):
732                 installed_package = self.vartree.dep_bestmatch(package)
733                 result = ""
734                 version = self.getVersion(installed_package,search.VERSION_RELEASE)
735                 if len(version) > 0:
736                         result = darkgreen("Latest version installed:")+" "+version
737                 else:
738                         result = darkgreen("Latest version installed:")+" [ Not Installed ]"
739                 return result
740
741         def getVersion(self,full_package,detail):
742                 if len(full_package) > 1:
743                         package_parts = portage.catpkgsplit(full_package)
744                         if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
745                                 result = package_parts[2]+ "-" + package_parts[3]
746                         else:
747                                 result = package_parts[2]
748                 else:
749                         result = ""
750                 return result
751
752 class RootConfig(object):
753         """This is used internally by depgraph to track information about a
754         particular $ROOT."""
755
756         pkg_tree_map = {
757                 "ebuild"    : "porttree",
758                 "binary"    : "bintree",
759                 "installed" : "vartree"
760         }
761
762         tree_pkg_map = {}
763         for k, v in pkg_tree_map.iteritems():
764                 tree_pkg_map[v] = k
765
766         def __init__(self, settings, trees, setconfig):
767                 self.trees = trees
768                 self.settings = settings
769                 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
770                 self.root = self.settings["ROOT"]
771                 self.setconfig = setconfig
772                 self.sets = self.setconfig.getSets()
773                 self.visible_pkgs = PackageVirtualDbapi(self.settings)
774
775 def create_world_atom(pkg, args_set, root_config):
776         """Create a new atom for the world file if one does not exist.  If the
777         argument atom is precise enough to identify a specific slot then a slot
778         atom will be returned. Atoms that are in the system set may also be stored
779         in world since system atoms can only match one slot while world atoms can
780         be greedy with respect to slots.  Unslotted system packages will not be
781         stored in world."""
782
783         arg_atom = args_set.findAtomForPackage(pkg)
784         if not arg_atom:
785                 return None
786         cp = portage.dep_getkey(arg_atom)
787         new_world_atom = cp
788         sets = root_config.sets
789         portdb = root_config.trees["porttree"].dbapi
790         vardb = root_config.trees["vartree"].dbapi
791         available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
792                 for cpv in portdb.match(cp))
793         slotted = len(available_slots) > 1 or \
794                 (len(available_slots) == 1 and "0" not in available_slots)
795         if not slotted:
796                 # check the vdb in case this is multislot
797                 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
798                         for cpv in vardb.match(cp))
799                 slotted = len(available_slots) > 1 or \
800                         (len(available_slots) == 1 and "0" not in available_slots)
801         if slotted and arg_atom != cp:
802                 # If the user gave a specific atom, store it as a
803                 # slot atom in the world file.
804                 slot_atom = pkg.slot_atom
805
806                 # For USE=multislot, there are a couple of cases to
807                 # handle here:
808                 #
809                 # 1) SLOT="0", but the real SLOT spontaneously changed to some
810                 #    unknown value, so just record an unslotted atom.
811                 #
812                 # 2) SLOT comes from an installed package and there is no
813                 #    matching SLOT in the portage tree.
814                 #
815                 # Make sure that the slot atom is available in either the
816                 # portdb or the vardb, since otherwise the user certainly
817                 # doesn't want the SLOT atom recorded in the world file
818                 # (case 1 above).  If it's only available in the vardb,
819                 # the user may be trying to prevent a USE=multislot
820                 # package from being removed by --depclean (case 2 above).
821
822                 mydb = portdb
823                 if not portdb.match(slot_atom):
824                         # SLOT seems to come from an installed multislot package
825                         mydb = vardb
826                 # If there is no installed package matching the SLOT atom,
827                 # it probably changed SLOT spontaneously due to USE=multislot,
828                 # so just record an unslotted atom.
829                 if vardb.match(slot_atom):
830                         # Now verify that the argument is precise
831                         # enough to identify a specific slot.
832                         matches = mydb.match(arg_atom)
833                         matched_slots = set()
834                         for cpv in matches:
835                                 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
836                         if len(matched_slots) == 1:
837                                 new_world_atom = slot_atom
838
839         if new_world_atom == sets["world"].findAtomForPackage(pkg):
840                 # Both atoms would be identical, so there's nothing to add.
841                 return None
842         if not slotted:
843                 # Unlike world atoms, system atoms are not greedy for slots, so they
844                 # can't be safely excluded from world if they are slotted.
845                 system_atom = sets["system"].findAtomForPackage(pkg)
846                 if system_atom:
847                         if not portage.dep_getkey(system_atom).startswith("virtual/"):
848                                 return None
849                         # System virtuals aren't safe to exclude from world since they can
850                         # match multiple old-style virtuals but only one of them will be
851                         # pulled in by update or depclean.
852                         providers = portdb.mysettings.getvirtuals().get(
853                                 portage.dep_getkey(system_atom))
854                         if providers and len(providers) == 1 and providers[0] == cp:
855                                 return None
856         return new_world_atom
857
858 def filter_iuse_defaults(iuse):
859         for flag in iuse:
860                 if flag.startswith("+") or flag.startswith("-"):
861                         yield flag[1:]
862                 else:
863                         yield flag
864
865 class SlotObject(object):
866         __slots__ = ("__weakref__",)
867
868         def __init__(self, **kwargs):
869                 classes = [self.__class__]
870                 while classes:
871                         c = classes.pop()
872                         if c is SlotObject:
873                                 continue
874                         classes.extend(c.__bases__)
875                         slots = getattr(c, "__slots__", None)
876                         if not slots:
877                                 continue
878                         for myattr in slots:
879                                 myvalue = kwargs.get(myattr, None)
880                                 setattr(self, myattr, myvalue)
881
882         def copy(self):
883                 """
884                 Create a new instance and copy all attributes
885                 defined from __slots__ (including those from
886                 inherited classes).
887                 """
888                 obj = self.__class__()
889
890                 classes = [self.__class__]
891                 while classes:
892                         c = classes.pop()
893                         if c is SlotObject:
894                                 continue
895                         classes.extend(c.__bases__)
896                         slots = getattr(c, "__slots__", None)
897                         if not slots:
898                                 continue
899                         for myattr in slots:
900                                 setattr(obj, myattr, getattr(self, myattr))
901
902                 return obj
903
904 class AbstractDepPriority(SlotObject):
905         __slots__ = ("buildtime", "runtime", "runtime_post")
906
907         def __lt__(self, other):
908                 return self.__int__() < other
909
910         def __le__(self, other):
911                 return self.__int__() <= other
912
913         def __eq__(self, other):
914                 return self.__int__() == other
915
916         def __ne__(self, other):
917                 return self.__int__() != other
918
919         def __gt__(self, other):
920                 return self.__int__() > other
921
922         def __ge__(self, other):
923                 return self.__int__() >= other
924
925         def copy(self):
926                 import copy
927                 return copy.copy(self)
928
929 class DepPriority(AbstractDepPriority):
930
931         __slots__ = ("satisfied", "optional", "rebuild")
932
933         def __int__(self):
934                 return 0
935
936         def __str__(self):
937                 if self.optional:
938                         return "optional"
939                 if self.buildtime:
940                         return "buildtime"
941                 if self.runtime:
942                         return "runtime"
943                 if self.runtime_post:
944                         return "runtime_post"
945                 return "soft"
946
947 class BlockerDepPriority(DepPriority):
948         __slots__ = ()
949         def __int__(self):
950                 return 0
951
952         def __str__(self):
953                 return 'blocker'
954
955 BlockerDepPriority.instance = BlockerDepPriority()
956
957 class UnmergeDepPriority(AbstractDepPriority):
958         __slots__ = ("optional", "satisfied",)
959         """
960         Combination of properties           Priority  Category
961
962         runtime                                0       HARD
963         runtime_post                          -1       HARD
964         buildtime                             -2       SOFT
965         (none of the above)                   -2       SOFT
966         """
967
968         MAX    =  0
969         SOFT   = -2
970         MIN    = -2
971
972         def __int__(self):
973                 if self.runtime:
974                         return 0
975                 if self.runtime_post:
976                         return -1
977                 if self.buildtime:
978                         return -2
979                 return -2
980
981         def __str__(self):
982                 myvalue = self.__int__()
983                 if myvalue > self.SOFT:
984                         return "hard"
985                 return "soft"
986
987 class DepPriorityNormalRange(object):
988         """
989         DepPriority properties              Index      Category
990
991         buildtime                                      HARD
992         runtime                                3       MEDIUM
993         runtime_post                           2       MEDIUM_SOFT
994         optional                               1       SOFT
995         (none of the above)                    0       NONE
996         """
997         MEDIUM      = 3
998         MEDIUM_SOFT = 2
999         SOFT        = 1
1000         NONE        = 0
1001
1002         @classmethod
1003         def _ignore_optional(cls, priority):
1004                 if priority.__class__ is not DepPriority:
1005                         return False
1006                 return bool(priority.optional)
1007
1008         @classmethod
1009         def _ignore_runtime_post(cls, priority):
1010                 if priority.__class__ is not DepPriority:
1011                         return False
1012                 return bool(priority.optional or priority.runtime_post)
1013
1014         @classmethod
1015         def _ignore_runtime(cls, priority):
1016                 if priority.__class__ is not DepPriority:
1017                         return False
1018                 return not priority.buildtime
1019
1020         ignore_medium      = _ignore_runtime
1021         ignore_medium_soft = _ignore_runtime_post
1022         ignore_soft        = _ignore_optional
1023
1024 DepPriorityNormalRange.ignore_priority = (
1025         None,
1026         DepPriorityNormalRange._ignore_optional,
1027         DepPriorityNormalRange._ignore_runtime_post,
1028         DepPriorityNormalRange._ignore_runtime
1029 )
1030
1031 class DepPrioritySatisfiedRange(object):
1032         """
1033         DepPriority                         Index      Category
1034
1035         not satisfied and buildtime                    HARD
1036         not satisfied and runtime              7       MEDIUM
1037         not satisfied and runtime_post         6       MEDIUM_SOFT
1038         satisfied and buildtime and rebuild    5       SOFT
1039         satisfied and buildtime                4       SOFT
1040         satisfied and runtime                  3       SOFT
1041         satisfied and runtime_post             2       SOFT
1042         optional                               1       SOFT
1043         (none of the above)                    0       NONE
1044         """
1045         MEDIUM      = 7
1046         MEDIUM_SOFT = 6
1047         SOFT        = 5
1048         NONE        = 0
1049
1050         @classmethod
1051         def _ignore_optional(cls, priority):
1052                 if priority.__class__ is not DepPriority:
1053                         return False
1054                 return bool(priority.optional)
1055
1056         @classmethod
1057         def _ignore_satisfied_runtime_post(cls, priority):
1058                 if priority.__class__ is not DepPriority:
1059                         return False
1060                 if priority.optional:
1061                         return True
1062                 if not priority.satisfied:
1063                         return False
1064                 return bool(priority.runtime_post)
1065
1066         @classmethod
1067         def _ignore_satisfied_runtime(cls, priority):
1068                 if priority.__class__ is not DepPriority:
1069                         return False
1070                 if priority.optional:
1071                         return True
1072                 if not priority.satisfied:
1073                         return False
1074                 return not priority.buildtime
1075
1076         @classmethod
1077         def _ignore_satisfied_buildtime(cls, priority):
1078                 if priority.__class__ is not DepPriority:
1079                         return False
1080                 if priority.optional:
1081                         return True
1082                 if not priority.satisfied:
1083                         return False
1084                 if priority.buildtime:
1085                         return not priority.rebuild
1086                 return True
1087
1088         @classmethod
1089         def _ignore_satisfied_buildtime_rebuild(cls, priority):
1090                 if priority.__class__ is not DepPriority:
1091                         return False
1092                 if priority.optional:
1093                         return True
1094                 return bool(priority.satisfied)
1095
1096         @classmethod
1097         def _ignore_runtime_post(cls, priority):
1098                 if priority.__class__ is not DepPriority:
1099                         return False
1100                 return bool(priority.optional or \
1101                         priority.satisfied or \
1102                         priority.runtime_post)
1103
1104         @classmethod
1105         def _ignore_runtime(cls, priority):
1106                 if priority.__class__ is not DepPriority:
1107                         return False
1108                 return bool(priority.satisfied or \
1109                         not priority.buildtime)
1110
1111         ignore_medium      = _ignore_runtime
1112         ignore_medium_soft = _ignore_runtime_post
1113         ignore_soft        = _ignore_satisfied_buildtime_rebuild
1114
1115 DepPrioritySatisfiedRange.ignore_priority = (
1116         None,
1117         DepPrioritySatisfiedRange._ignore_optional,
1118         DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1119         DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1120         DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1121         DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1122         DepPrioritySatisfiedRange._ignore_runtime_post,
1123         DepPrioritySatisfiedRange._ignore_runtime
1124 )
1125
1126 def _find_deep_system_runtime_deps(graph):
1127         deep_system_deps = set()
1128         node_stack = []
1129         for node in graph:
1130                 if not isinstance(node, Package) or \
1131                         node.operation == 'uninstall':
1132                         continue
1133                 if node.root_config.sets['system'].findAtomForPackage(node):
1134                         node_stack.append(node)
1135
1136         def ignore_priority(priority):
1137                 """
1138                 Ignore non-runtime priorities.
1139                 """
1140                 if isinstance(priority, DepPriority) and \
1141                         (priority.runtime or priority.runtime_post):
1142                         return False
1143                 return True
1144
1145         while node_stack:
1146                 node = node_stack.pop()
1147                 if node in deep_system_deps:
1148                         continue
1149                 deep_system_deps.add(node)
1150                 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1151                         if not isinstance(child, Package) or \
1152                                 child.operation == 'uninstall':
1153                                 continue
1154                         node_stack.append(child)
1155
1156         return deep_system_deps
1157
1158 class FakeVartree(portage.vartree):
1159         """This is implements an in-memory copy of a vartree instance that provides
1160         all the interfaces required for use by the depgraph.  The vardb is locked
1161         during the constructor call just long enough to read a copy of the
1162         installed package information.  This allows the depgraph to do it's
1163         dependency calculations without holding a lock on the vardb.  It also
1164         allows things like vardb global updates to be done in memory so that the
1165         user doesn't necessarily need write access to the vardb in cases where
1166         global updates are necessary (updates are performed when necessary if there
1167         is not a matching ebuild in the tree)."""
1168         def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1169                 self._root_config = root_config
1170                 if pkg_cache is None:
1171                         pkg_cache = {}
1172                 real_vartree = root_config.trees["vartree"]
1173                 portdb = root_config.trees["porttree"].dbapi
1174                 self.root = real_vartree.root
1175                 self.settings = real_vartree.settings
1176                 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1177                 if "_mtime_" not in mykeys:
1178                         mykeys.append("_mtime_")
1179                 self._db_keys = mykeys
1180                 self._pkg_cache = pkg_cache
1181                 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1182                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1183                 try:
1184                         # At least the parent needs to exist for the lock file.
1185                         portage.util.ensure_dirs(vdb_path)
1186                 except portage.exception.PortageException:
1187                         pass
1188                 vdb_lock = None
1189                 try:
1190                         if acquire_lock and os.access(vdb_path, os.W_OK):
1191                                 vdb_lock = portage.locks.lockdir(vdb_path)
1192                         real_dbapi = real_vartree.dbapi
1193                         slot_counters = {}
1194                         for cpv in real_dbapi.cpv_all():
1195                                 cache_key = ("installed", self.root, cpv, "nomerge")
1196                                 pkg = self._pkg_cache.get(cache_key)
1197                                 if pkg is not None:
1198                                         metadata = pkg.metadata
1199                                 else:
1200                                         metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1201                                 myslot = metadata["SLOT"]
1202                                 mycp = portage.dep_getkey(cpv)
1203                                 myslot_atom = "%s:%s" % (mycp, myslot)
1204                                 try:
1205                                         mycounter = long(metadata["COUNTER"])
1206                                 except ValueError:
1207                                         mycounter = 0
1208                                         metadata["COUNTER"] = str(mycounter)
1209                                 other_counter = slot_counters.get(myslot_atom, None)
1210                                 if other_counter is not None:
1211                                         if other_counter > mycounter:
1212                                                 continue
1213                                 slot_counters[myslot_atom] = mycounter
1214                                 if pkg is None:
1215                                         pkg = Package(built=True, cpv=cpv,
1216                                                 installed=True, metadata=metadata,
1217                                                 root_config=root_config, type_name="installed")
1218                                 self._pkg_cache[pkg] = pkg
1219                                 self.dbapi.cpv_inject(pkg)
1220                         real_dbapi.flush_cache()
1221                 finally:
1222                         if vdb_lock:
1223                                 portage.locks.unlockdir(vdb_lock)
1224                 # Populate the old-style virtuals using the cached values.
1225                 if not self.settings.treeVirtuals:
1226                         self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1227                                 portage.getCPFromCPV, self.get_all_provides())
1228
1229                 # Intialize variables needed for lazy cache pulls of the live ebuild
1230                 # metadata.  This ensures that the vardb lock is released ASAP, without
1231                 # being delayed in case cache generation is triggered.
1232                 self._aux_get = self.dbapi.aux_get
1233                 self.dbapi.aux_get = self._aux_get_wrapper
1234                 self._match = self.dbapi.match
1235                 self.dbapi.match = self._match_wrapper
1236                 self._aux_get_history = set()
1237                 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1238                 self._portdb = portdb
1239                 self._global_updates = None
1240
1241         def _match_wrapper(self, cpv, use_cache=1):
1242                 """
1243                 Make sure the metadata in Package instances gets updated for any
1244                 cpv that is returned from a match() call, since the metadata can
1245                 be accessed directly from the Package instance instead of via
1246                 aux_get().
1247                 """
1248                 matches = self._match(cpv, use_cache=use_cache)
1249                 for cpv in matches:
1250                         if cpv in self._aux_get_history:
1251                                 continue
1252                         self._aux_get_wrapper(cpv, [])
1253                 return matches
1254
1255         def _aux_get_wrapper(self, pkg, wants):
1256                 if pkg in self._aux_get_history:
1257                         return self._aux_get(pkg, wants)
1258                 self._aux_get_history.add(pkg)
1259                 try:
1260                         # Use the live ebuild metadata if possible.
1261                         live_metadata = dict(izip(self._portdb_keys,
1262                                 self._portdb.aux_get(pkg, self._portdb_keys)))
1263                         if not portage.eapi_is_supported(live_metadata["EAPI"]):
1264                                 raise KeyError(pkg)
1265                         self.dbapi.aux_update(pkg, live_metadata)
1266                 except (KeyError, portage.exception.PortageException):
1267                         if self._global_updates is None:
1268                                 self._global_updates = \
1269                                         grab_global_updates(self._portdb.porttree_root)
1270                         perform_global_updates(
1271                                 pkg, self.dbapi, self._global_updates)
1272                 return self._aux_get(pkg, wants)
1273
1274         def sync(self, acquire_lock=1):
1275                 """
1276                 Call this method to synchronize state with the real vardb
1277                 after one or more packages may have been installed or
1278                 uninstalled.
1279                 """
1280                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1281                 try:
1282                         # At least the parent needs to exist for the lock file.
1283                         portage.util.ensure_dirs(vdb_path)
1284                 except portage.exception.PortageException:
1285                         pass
1286                 vdb_lock = None
1287                 try:
1288                         if acquire_lock and os.access(vdb_path, os.W_OK):
1289                                 vdb_lock = portage.locks.lockdir(vdb_path)
1290                         self._sync()
1291                 finally:
1292                         if vdb_lock:
1293                                 portage.locks.unlockdir(vdb_lock)
1294
1295         def _sync(self):
1296
1297                 real_vardb = self._root_config.trees["vartree"].dbapi
1298                 current_cpv_set = frozenset(real_vardb.cpv_all())
1299                 pkg_vardb = self.dbapi
1300                 aux_get_history = self._aux_get_history
1301
1302                 # Remove any packages that have been uninstalled.
1303                 for pkg in list(pkg_vardb):
1304                         if pkg.cpv not in current_cpv_set:
1305                                 pkg_vardb.cpv_remove(pkg)
1306                                 aux_get_history.discard(pkg.cpv)
1307
1308                 # Validate counters and timestamps.
1309                 slot_counters = {}
1310                 root = self.root
1311                 validation_keys = ["COUNTER", "_mtime_"]
1312                 for cpv in current_cpv_set:
1313
1314                         pkg_hash_key = ("installed", root, cpv, "nomerge")
1315                         pkg = pkg_vardb.get(pkg_hash_key)
1316                         if pkg is not None:
1317                                 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1318                                 try:
1319                                         counter = long(counter)
1320                                 except ValueError:
1321                                         counter = 0
1322
1323                                 if counter != pkg.counter or \
1324                                         mtime != pkg.mtime:
1325                                         pkg_vardb.cpv_remove(pkg)
1326                                         aux_get_history.discard(pkg.cpv)
1327                                         pkg = None
1328
1329                         if pkg is None:
1330                                 pkg = self._pkg(cpv)
1331
1332                         other_counter = slot_counters.get(pkg.slot_atom)
1333                         if other_counter is not None:
1334                                 if other_counter > pkg.counter:
1335                                         continue
1336
1337                         slot_counters[pkg.slot_atom] = pkg.counter
1338                         pkg_vardb.cpv_inject(pkg)
1339
1340                 real_vardb.flush_cache()
1341
1342         def _pkg(self, cpv):
1343                 root_config = self._root_config
1344                 real_vardb = root_config.trees["vartree"].dbapi
1345                 pkg = Package(cpv=cpv, installed=True,
1346                         metadata=izip(self._db_keys,
1347                         real_vardb.aux_get(cpv, self._db_keys)),
1348                         root_config=root_config,
1349                         type_name="installed")
1350
1351                 try:
1352                         mycounter = long(pkg.metadata["COUNTER"])
1353                 except ValueError:
1354                         mycounter = 0
1355                         pkg.metadata["COUNTER"] = str(mycounter)
1356
1357                 return pkg
1358
1359 def grab_global_updates(portdir):
1360         from portage.update import grab_updates, parse_updates
1361         updpath = os.path.join(portdir, "profiles", "updates")
1362         try:
1363                 rawupdates = grab_updates(updpath)
1364         except portage.exception.DirectoryNotFound:
1365                 rawupdates = []
1366         upd_commands = []
1367         for mykey, mystat, mycontent in rawupdates:
1368                 commands, errors = parse_updates(mycontent)
1369                 upd_commands.extend(commands)
1370         return upd_commands
1371
1372 def perform_global_updates(mycpv, mydb, mycommands):
1373         from portage.update import update_dbentries
1374         aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1375         aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1376         updates = update_dbentries(mycommands, aux_dict)
1377         if updates:
1378                 mydb.aux_update(mycpv, updates)
1379
1380 def visible(pkgsettings, pkg):
1381         """
1382         Check if a package is visible. This can raise an InvalidDependString
1383         exception if LICENSE is invalid.
1384         TODO: optionally generate a list of masking reasons
1385         @rtype: Boolean
1386         @returns: True if the package is visible, False otherwise.
1387         """
1388         if not pkg.metadata["SLOT"]:
1389                 return False
1390         if not pkg.installed:
1391                 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1392                         return False
1393         eapi = pkg.metadata["EAPI"]
1394         if not portage.eapi_is_supported(eapi):
1395                 return False
1396         if not pkg.installed:
1397                 if portage._eapi_is_deprecated(eapi):
1398                         return False
1399                 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1400                         return False
1401         if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1402                 return False
1403         if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1404                 return False
1405         try:
1406                 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1407                         return False
1408         except portage.exception.InvalidDependString:
1409                 return False
1410         return True
1411
1412 def get_masking_status(pkg, pkgsettings, root_config):
1413
1414         mreasons = portage.getmaskingstatus(
1415                 pkg, settings=pkgsettings,
1416                 portdb=root_config.trees["porttree"].dbapi)
1417
1418         if not pkg.installed:
1419                 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1420                         mreasons.append("CHOST: %s" % \
1421                                 pkg.metadata["CHOST"])
1422
1423         if not pkg.metadata["SLOT"]:
1424                 mreasons.append("invalid: SLOT is undefined")
1425
1426         return mreasons
1427
1428 def get_mask_info(root_config, cpv, pkgsettings,
1429         db, pkg_type, built, installed, db_keys):
1430         eapi_masked = False
1431         try:
1432                 metadata = dict(izip(db_keys,
1433                         db.aux_get(cpv, db_keys)))
1434         except KeyError:
1435                 metadata = None
1436         if metadata and not built:
1437                 pkgsettings.setcpv(cpv, mydb=metadata)
1438                 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1439                 metadata['CHOST'] = pkgsettings.get('CHOST', '')
1440         if metadata is None:
1441                 mreasons = ["corruption"]
1442         else:
1443                 pkg = Package(type_name=pkg_type, root_config=root_config,
1444                         cpv=cpv, built=built, installed=installed, metadata=metadata)
1445                 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1446         return metadata, mreasons
1447
1448 def show_masked_packages(masked_packages):
1449         shown_licenses = set()
1450         shown_comments = set()
1451         # Maybe there is both an ebuild and a binary. Only
1452         # show one of them to avoid redundant appearance.
1453         shown_cpvs = set()
1454         have_eapi_mask = False
1455         for (root_config, pkgsettings, cpv,
1456                 metadata, mreasons) in masked_packages:
1457                 if cpv in shown_cpvs:
1458                         continue
1459                 shown_cpvs.add(cpv)
1460                 comment, filename = None, None
1461                 if "package.mask" in mreasons:
1462                         comment, filename = \
1463                                 portage.getmaskingreason(
1464                                 cpv, metadata=metadata,
1465                                 settings=pkgsettings,
1466                                 portdb=root_config.trees["porttree"].dbapi,
1467                                 return_location=True)
1468                 missing_licenses = []
1469                 if metadata:
1470                         if not portage.eapi_is_supported(metadata["EAPI"]):
1471                                 have_eapi_mask = True
1472                         try:
1473                                 missing_licenses = \
1474                                         pkgsettings._getMissingLicenses(
1475                                                 cpv, metadata)
1476                         except portage.exception.InvalidDependString:
1477                                 # This will have already been reported
1478                                 # above via mreasons.
1479                                 pass
1480
1481                 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1482                 if comment and comment not in shown_comments:
1483                         print filename+":"
1484                         print comment
1485                         shown_comments.add(comment)
1486                 portdb = root_config.trees["porttree"].dbapi
1487                 for l in missing_licenses:
1488                         l_path = portdb.findLicensePath(l)
1489                         if l in shown_licenses:
1490                                 continue
1491                         msg = ("A copy of the '%s' license" + \
1492                         " is located at '%s'.") % (l, l_path)
1493                         print msg
1494                         print
1495                         shown_licenses.add(l)
1496         return have_eapi_mask
1497
1498 class Task(SlotObject):
1499         __slots__ = ("_hash_key", "_hash_value")
1500
1501         def _get_hash_key(self):
1502                 hash_key = getattr(self, "_hash_key", None)
1503                 if hash_key is None:
1504                         raise NotImplementedError(self)
1505                 return hash_key
1506
1507         def __eq__(self, other):
1508                 return self._get_hash_key() == other
1509
1510         def __ne__(self, other):
1511                 return self._get_hash_key() != other
1512
1513         def __hash__(self):
1514                 hash_value = getattr(self, "_hash_value", None)
1515                 if hash_value is None:
1516                         self._hash_value = hash(self._get_hash_key())
1517                 return self._hash_value
1518
1519         def __len__(self):
1520                 return len(self._get_hash_key())
1521
1522         def __getitem__(self, key):
1523                 return self._get_hash_key()[key]
1524
1525         def __iter__(self):
1526                 return iter(self._get_hash_key())
1527
1528         def __contains__(self, key):
1529                 return key in self._get_hash_key()
1530
1531         def __str__(self):
1532                 return str(self._get_hash_key())
1533
1534 class Blocker(Task):
1535
1536         __hash__ = Task.__hash__
1537         __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1538
1539         def __init__(self, **kwargs):
1540                 Task.__init__(self, **kwargs)
1541                 self.cp = portage.dep_getkey(self.atom)
1542
1543         def _get_hash_key(self):
1544                 hash_key = getattr(self, "_hash_key", None)
1545                 if hash_key is None:
1546                         self._hash_key = \
1547                                 ("blocks", self.root, self.atom, self.eapi)
1548                 return self._hash_key
1549
1550 class Package(Task):
1551
1552         __hash__ = Task.__hash__
1553         __slots__ = ("built", "cpv", "depth",
1554                 "installed", "metadata", "onlydeps", "operation",
1555                 "root_config", "type_name",
1556                 "category", "counter", "cp", "cpv_split",
1557                 "inherited", "iuse", "mtime",
1558                 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1559
1560         metadata_keys = [
1561                 "CHOST", "COUNTER", "DEPEND", "EAPI",
1562                 "INHERITED", "IUSE", "KEYWORDS",
1563                 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1564                 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1565
1566         def __init__(self, **kwargs):
1567                 Task.__init__(self, **kwargs)
1568                 self.root = self.root_config.root
1569                 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1570                 self.cp = portage.cpv_getkey(self.cpv)
1571                 slot = self.slot
1572                 if not slot:
1573                         # Avoid an InvalidAtom exception when creating slot_atom.
1574                         # This package instance will be masked due to empty SLOT.
1575                         slot = '0'
1576                 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, slot))
1577                 self.category, self.pf = portage.catsplit(self.cpv)
1578                 self.cpv_split = portage.catpkgsplit(self.cpv)
1579                 self.pv_split = self.cpv_split[1:]
1580
1581         class _use(object):
1582
1583                 __slots__ = ("__weakref__", "enabled")
1584
1585                 def __init__(self, use):
1586                         self.enabled = frozenset(use)
1587
1588         class _iuse(object):
1589
1590                 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1591
1592                 def __init__(self, tokens, iuse_implicit):
1593                         self.tokens = tuple(tokens)
1594                         self.iuse_implicit = iuse_implicit
1595                         enabled = []
1596                         disabled = []
1597                         other = []
1598                         for x in tokens:
1599                                 prefix = x[:1]
1600                                 if prefix == "+":
1601                                         enabled.append(x[1:])
1602                                 elif prefix == "-":
1603                                         disabled.append(x[1:])
1604                                 else:
1605                                         other.append(x)
1606                         self.enabled = frozenset(enabled)
1607                         self.disabled = frozenset(disabled)
1608                         self.all = frozenset(chain(enabled, disabled, other))
1609
1610                 def __getattribute__(self, name):
1611                         if name == "regex":
1612                                 try:
1613                                         return object.__getattribute__(self, "regex")
1614                                 except AttributeError:
1615                                         all = object.__getattribute__(self, "all")
1616                                         iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1617                                         # Escape anything except ".*" which is supposed
1618                                         # to pass through from _get_implicit_iuse()
1619                                         regex = (re.escape(x) for x in chain(all, iuse_implicit))
1620                                         regex = "^(%s)$" % "|".join(regex)
1621                                         regex = regex.replace("\\.\\*", ".*")
1622                                         self.regex = re.compile(regex)
1623                         return object.__getattribute__(self, name)
1624
1625         def _get_hash_key(self):
1626                 hash_key = getattr(self, "_hash_key", None)
1627                 if hash_key is None:
1628                         if self.operation is None:
1629                                 self.operation = "merge"
1630                                 if self.onlydeps or self.installed:
1631                                         self.operation = "nomerge"
1632                         self._hash_key = \
1633                                 (self.type_name, self.root, self.cpv, self.operation)
1634                 return self._hash_key
1635
1636         def __lt__(self, other):
1637                 if other.cp != self.cp:
1638                         return False
1639                 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1640                         return True
1641                 return False
1642
1643         def __le__(self, other):
1644                 if other.cp != self.cp:
1645                         return False
1646                 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1647                         return True
1648                 return False
1649
1650         def __gt__(self, other):
1651                 if other.cp != self.cp:
1652                         return False
1653                 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1654                         return True
1655                 return False
1656
1657         def __ge__(self, other):
1658                 if other.cp != self.cp:
1659                         return False
1660                 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1661                         return True
1662                 return False
1663
1664 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1665         if not x.startswith("UNUSED_"))
1666 _all_metadata_keys.discard("CDEPEND")
1667 _all_metadata_keys.update(Package.metadata_keys)
1668
1669 from portage.cache.mappings import slot_dict_class
1670 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1671
1672 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1673         """
1674         Detect metadata updates and synchronize Package attributes.
1675         """
1676
1677         __slots__ = ("_pkg",)
1678         _wrapped_keys = frozenset(
1679                 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1680
1681         def __init__(self, pkg, metadata):
1682                 _PackageMetadataWrapperBase.__init__(self)
1683                 self._pkg = pkg
1684                 self.update(metadata)
1685
1686         def __setitem__(self, k, v):
1687                 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1688                 if k in self._wrapped_keys:
1689                         getattr(self, "_set_" + k.lower())(k, v)
1690
1691         def _set_inherited(self, k, v):
1692                 if isinstance(v, basestring):
1693                         v = frozenset(v.split())
1694                 self._pkg.inherited = v
1695
1696         def _set_iuse(self, k, v):
1697                 self._pkg.iuse = self._pkg._iuse(
1698                         v.split(), self._pkg.root_config.iuse_implicit)
1699
1700         def _set_slot(self, k, v):
1701                 self._pkg.slot = v
1702
1703         def _set_use(self, k, v):
1704                 self._pkg.use = self._pkg._use(v.split())
1705
1706         def _set_counter(self, k, v):
1707                 if isinstance(v, basestring):
1708                         try:
1709                                 v = long(v.strip())
1710                         except ValueError:
1711                                 v = 0
1712                 self._pkg.counter = v
1713
1714         def _set__mtime_(self, k, v):
1715                 if isinstance(v, basestring):
1716                         try:
1717                                 v = long(v.strip())
1718                         except ValueError:
1719                                 v = 0
1720                 self._pkg.mtime = v
1721
1722 class EbuildFetchonly(SlotObject):
1723
1724         __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1725
1726         def execute(self):
1727                 settings = self.settings
1728                 pkg = self.pkg
1729                 portdb = pkg.root_config.trees["porttree"].dbapi
1730                 ebuild_path = portdb.findname(pkg.cpv)
1731                 settings.setcpv(pkg)
1732                 debug = settings.get("PORTAGE_DEBUG") == "1"
1733                 use_cache = 1 # always true
1734                 portage.doebuild_environment(ebuild_path, "fetch",
1735                         settings["ROOT"], settings, debug, use_cache, portdb)
1736                 restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1737
1738                 if restrict_fetch:
1739                         rval = self._execute_with_builddir()
1740                 else:
1741                         rval = portage.doebuild(ebuild_path, "fetch",
1742                                 settings["ROOT"], settings, debug=debug,
1743                                 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1744                                 mydbapi=portdb, tree="porttree")
1745
1746                         if rval != os.EX_OK:
1747                                 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1748                                 eerror(msg, phase="unpack", key=pkg.cpv)
1749
1750                 return rval
1751
1752         def _execute_with_builddir(self):
1753                 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1754                 # ensuring sane $PWD (bug #239560) and storing elog
1755                 # messages. Use a private temp directory, in order
1756                 # to avoid locking the main one.
1757                 settings = self.settings
1758                 global_tmpdir = settings["PORTAGE_TMPDIR"]
1759                 from tempfile import mkdtemp
1760                 try:
1761                         private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1762                 except OSError, e:
1763                         if e.errno != portage.exception.PermissionDenied.errno:
1764                                 raise
1765                         raise portage.exception.PermissionDenied(global_tmpdir)
1766                 settings["PORTAGE_TMPDIR"] = private_tmpdir
1767                 settings.backup_changes("PORTAGE_TMPDIR")
1768                 try:
1769                         retval = self._execute()
1770                 finally:
1771                         settings["PORTAGE_TMPDIR"] = global_tmpdir
1772                         settings.backup_changes("PORTAGE_TMPDIR")
1773                         shutil.rmtree(private_tmpdir)
1774                 return retval
1775
1776         def _execute(self):
1777                 settings = self.settings
1778                 pkg = self.pkg
1779                 root_config = pkg.root_config
1780                 portdb = root_config.trees["porttree"].dbapi
1781                 ebuild_path = portdb.findname(pkg.cpv)
1782                 debug = settings.get("PORTAGE_DEBUG") == "1"
1783                 portage.prepare_build_dirs(self.pkg.root, self.settings, 0)
1784
1785                 retval = portage.doebuild(ebuild_path, "fetch",
1786                         self.settings["ROOT"], self.settings, debug=debug,
1787                         listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1788                         mydbapi=portdb, tree="porttree")
1789
1790                 if retval != os.EX_OK:
1791                         msg = "Fetch failed for '%s'" % (pkg.cpv,)
1792                         eerror(msg, phase="unpack", key=pkg.cpv)
1793
1794                 portage.elog.elog_process(self.pkg.cpv, self.settings)
1795                 return retval
1796
1797 class PollConstants(object):
1798
1799         """
1800         Provides POLL* constants that are equivalent to those from the
1801         select module, for use by PollSelectAdapter.
1802         """
1803
1804         names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1805         v = 1
1806         for k in names:
1807                 locals()[k] = getattr(select, k, v)
1808                 v *= 2
1809         del k, v
1810
1811 class AsynchronousTask(SlotObject):
1812         """
1813         Subclasses override _wait() and _poll() so that calls
1814         to public methods can be wrapped for implementing
1815         hooks such as exit listener notification.
1816
1817         Sublasses should call self.wait() to notify exit listeners after
1818         the task is complete and self.returncode has been set.
1819         """
1820
1821         __slots__ = ("background", "cancelled", "returncode") + \
1822                 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1823
1824         def start(self):
1825                 """
1826                 Start an asynchronous task and then return as soon as possible.
1827                 """
1828                 self._start_hook()
1829                 self._start()
1830
1831         def _start(self):
1832                 raise NotImplementedError(self)
1833
1834         def isAlive(self):
1835                 return self.returncode is None
1836
1837         def poll(self):
1838                 self._wait_hook()
1839                 return self._poll()
1840
1841         def _poll(self):
1842                 return self.returncode
1843
1844         def wait(self):
1845                 if self.returncode is None:
1846                         self._wait()
1847                 self._wait_hook()
1848                 return self.returncode
1849
1850         def _wait(self):
1851                 return self.returncode
1852
1853         def cancel(self):
1854                 self.cancelled = True
1855                 self.wait()
1856
1857         def addStartListener(self, f):
1858                 """
1859                 The function will be called with one argument, a reference to self.
1860                 """
1861                 if self._start_listeners is None:
1862                         self._start_listeners = []
1863                 self._start_listeners.append(f)
1864
1865         def removeStartListener(self, f):
1866                 if self._start_listeners is None:
1867                         return
1868                 self._start_listeners.remove(f)
1869
1870         def _start_hook(self):
1871                 if self._start_listeners is not None:
1872                         start_listeners = self._start_listeners
1873                         self._start_listeners = None
1874
1875                         for f in start_listeners:
1876                                 f(self)
1877
1878         def addExitListener(self, f):
1879                 """
1880                 The function will be called with one argument, a reference to self.
1881                 """
1882                 if self._exit_listeners is None:
1883                         self._exit_listeners = []
1884                 self._exit_listeners.append(f)
1885
1886         def removeExitListener(self, f):
1887                 if self._exit_listeners is None:
1888                         if self._exit_listener_stack is not None:
1889                                 self._exit_listener_stack.remove(f)
1890                         return
1891                 self._exit_listeners.remove(f)
1892
1893         def _wait_hook(self):
1894                 """
1895                 Call this method after the task completes, just before returning
1896                 the returncode from wait() or poll(). This hook is
1897                 used to trigger exit listeners when the returncode first
1898                 becomes available.
1899                 """
1900                 if self.returncode is not None and \
1901                         self._exit_listeners is not None:
1902
1903                         # This prevents recursion, in case one of the
1904                         # exit handlers triggers this method again by
1905                         # calling wait(). Use a stack that gives
1906                         # removeExitListener() an opportunity to consume
1907                         # listeners from the stack, before they can get
1908                         # called below. This is necessary because a call
1909                         # to one exit listener may result in a call to
1910                         # removeExitListener() for another listener on
1911                         # the stack. That listener needs to be removed
1912                         # from the stack since it would be inconsistent
1913                         # to call it after it has been been passed into
1914                         # removeExitListener().
1915                         self._exit_listener_stack = self._exit_listeners
1916                         self._exit_listeners = None
1917
1918                         self._exit_listener_stack.reverse()
1919                         while self._exit_listener_stack:
1920                                 self._exit_listener_stack.pop()(self)
1921
1922 class AbstractPollTask(AsynchronousTask):
1923
1924         __slots__ = ("scheduler",) + \
1925                 ("_registered",)
1926
1927         _bufsize = 4096
1928         _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1929         _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1930                 _exceptional_events
1931
1932         def _unregister(self):
1933                 raise NotImplementedError(self)
1934
1935         def _unregister_if_appropriate(self, event):
1936                 if self._registered:
1937                         if event & self._exceptional_events:
1938                                 self._unregister()
1939                                 self.cancel()
1940                         elif event & PollConstants.POLLHUP:
1941                                 self._unregister()
1942                                 self.wait()
1943
1944 class PipeReader(AbstractPollTask):
1945
1946         """
1947         Reads output from one or more files and saves it in memory,
1948         for retrieval via the getvalue() method. This is driven by
1949         the scheduler's poll() loop, so it runs entirely within the
1950         current process.
1951         """
1952
1953         __slots__ = ("input_files",) + \
1954                 ("_read_data", "_reg_ids")
1955
1956         def _start(self):
1957                 self._reg_ids = set()
1958                 self._read_data = []
1959                 for k, f in self.input_files.iteritems():
1960                         fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1961                                 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1962                         self._reg_ids.add(self.scheduler.register(f.fileno(),
1963                                 self._registered_events, self._output_handler))
1964                 self._registered = True
1965
1966         def isAlive(self):
1967                 return self._registered
1968
1969         def cancel(self):
1970                 if self.returncode is None:
1971                         self.returncode = 1
1972                         self.cancelled = True
1973                 self.wait()
1974
1975         def _wait(self):
1976                 if self.returncode is not None:
1977                         return self.returncode
1978
1979                 if self._registered:
1980                         self.scheduler.schedule(self._reg_ids)
1981                         self._unregister()
1982
1983                 self.returncode = os.EX_OK
1984                 return self.returncode
1985
1986         def getvalue(self):
1987                 """Retrieve the entire contents"""
1988                 if sys.hexversion >= 0x3000000:
1989                         return bytes().join(self._read_data)
1990                 return "".join(self._read_data)
1991
1992         def close(self):
1993                 """Free the memory buffer."""
1994                 self._read_data = None
1995
1996         def _output_handler(self, fd, event):
1997
1998                 if event & PollConstants.POLLIN:
1999
2000                         for f in self.input_files.itervalues():
2001                                 if fd == f.fileno():
2002                                         break
2003
2004                         buf = array.array('B')
2005                         try:
2006                                 buf.fromfile(f, self._bufsize)
2007                         except EOFError:
2008                                 pass
2009
2010                         if buf:
2011                                 self._read_data.append(buf.tostring())
2012                         else:
2013                                 self._unregister()
2014                                 self.wait()
2015
2016                 self._unregister_if_appropriate(event)
2017                 return self._registered
2018
2019         def _unregister(self):
2020                 """
2021                 Unregister from the scheduler and close open files.
2022                 """
2023
2024                 self._registered = False
2025
2026                 if self._reg_ids is not None:
2027                         for reg_id in self._reg_ids:
2028                                 self.scheduler.unregister(reg_id)
2029                         self._reg_ids = None
2030
2031                 if self.input_files is not None:
2032                         for f in self.input_files.itervalues():
2033                                 f.close()
2034                         self.input_files = None
2035
2036 class CompositeTask(AsynchronousTask):
2037
2038         __slots__ = ("scheduler",) + ("_current_task",)
2039
2040         def isAlive(self):
2041                 return self._current_task is not None
2042
2043         def cancel(self):
2044                 self.cancelled = True
2045                 if self._current_task is not None:
2046                         self._current_task.cancel()
2047
2048         def _poll(self):
2049                 """
2050                 This does a loop calling self._current_task.poll()
2051                 repeatedly as long as the value of self._current_task
2052                 keeps changing. It calls poll() a maximum of one time
2053                 for a given self._current_task instance. This is useful
2054                 since calling poll() on a task can trigger advance to
2055                 the next task could eventually lead to the returncode
2056                 being set in cases when polling only a single task would
2057                 not have the same effect.
2058                 """
2059
2060                 prev = None
2061                 while True:
2062                         task = self._current_task
2063                         if task is None or task is prev:
2064                                 # don't poll the same task more than once
2065                                 break
2066                         task.poll()
2067                         prev = task
2068
2069                 return self.returncode
2070
2071         def _wait(self):
2072
2073                 prev = None
2074                 while True:
2075                         task = self._current_task
2076                         if task is None:
2077                                 # don't wait for the same task more than once
2078                                 break
2079                         if task is prev:
2080                                 # Before the task.wait() method returned, an exit
2081                                 # listener should have set self._current_task to either
2082                                 # a different task or None. Something is wrong.
2083                                 raise AssertionError("self._current_task has not " + \
2084                                         "changed since calling wait", self, task)
2085                         task.wait()
2086                         prev = task
2087
2088                 return self.returncode
2089
2090         def _assert_current(self, task):
2091                 """
2092                 Raises an AssertionError if the given task is not the
2093                 same one as self._current_task. This can be useful
2094                 for detecting bugs.
2095                 """
2096                 if task is not self._current_task:
2097                         raise AssertionError("Unrecognized task: %s" % (task,))
2098
2099         def _default_exit(self, task):
2100                 """
2101                 Calls _assert_current() on the given task and then sets the
2102                 composite returncode attribute if task.returncode != os.EX_OK.
2103                 If the task failed then self._current_task will be set to None.
2104                 Subclasses can use this as a generic task exit callback.
2105
2106                 @rtype: int
2107                 @returns: The task.returncode attribute.
2108                 """
2109                 self._assert_current(task)
2110                 if task.returncode != os.EX_OK:
2111                         self.returncode = task.returncode
2112                         self._current_task = None
2113                 return task.returncode
2114
2115         def _final_exit(self, task):
2116                 """
2117                 Assumes that task is the final task of this composite task.
2118                 Calls _default_exit() and sets self.returncode to the task's
2119                 returncode and sets self._current_task to None.
2120                 """
2121                 self._default_exit(task)
2122                 self._current_task = None
2123                 self.returncode = task.returncode
2124                 return self.returncode
2125
2126         def _default_final_exit(self, task):
2127                 """
2128                 This calls _final_exit() and then wait().
2129
2130                 Subclasses can use this as a generic final task exit callback.
2131
2132                 """
2133                 self._final_exit(task)
2134                 return self.wait()
2135
2136         def _start_task(self, task, exit_handler):
2137                 """
2138                 Register exit handler for the given task, set it
2139                 as self._current_task, and call task.start().
2140
2141                 Subclasses can use this as a generic way to start
2142                 a task.
2143
2144                 """
2145                 task.addExitListener(exit_handler)
2146                 self._current_task = task
2147                 task.start()
2148
2149 class TaskSequence(CompositeTask):
2150         """
2151         A collection of tasks that executes sequentially. Each task
2152         must have a addExitListener() method that can be used as
2153         a means to trigger movement from one task to the next.
2154         """
2155
2156         __slots__ = ("_task_queue",)
2157
2158         def __init__(self, **kwargs):
2159                 AsynchronousTask.__init__(self, **kwargs)
2160                 self._task_queue = deque()
2161
2162         def add(self, task):
2163                 self._task_queue.append(task)
2164
2165         def _start(self):
2166                 self._start_next_task()
2167
2168         def cancel(self):
2169                 self._task_queue.clear()
2170                 CompositeTask.cancel(self)
2171
2172         def _start_next_task(self):
2173                 self._start_task(self._task_queue.popleft(),
2174                         self._task_exit_handler)
2175
2176         def _task_exit_handler(self, task):
2177                 if self._default_exit(task) != os.EX_OK:
2178                         self.wait()
2179                 elif self._task_queue:
2180                         self._start_next_task()
2181                 else:
2182                         self._final_exit(task)
2183                         self.wait()
2184
2185 class SubProcess(AbstractPollTask):
2186
2187         __slots__ = ("pid",) + \
2188                 ("_files", "_reg_id")
2189
2190         # A file descriptor is required for the scheduler to monitor changes from
2191         # inside a poll() loop. When logging is not enabled, create a pipe just to
2192         # serve this purpose alone.
2193         _dummy_pipe_fd = 9
2194
2195         def _poll(self):
2196                 if self.returncode is not None:
2197                         return self.returncode
2198                 if self.pid is None:
2199                         return self.returncode
2200                 if self._registered:
2201                         return self.returncode
2202
2203                 try:
2204                         retval = os.waitpid(self.pid, os.WNOHANG)
2205                 except OSError, e:
2206                         if e.errno != errno.ECHILD:
2207                                 raise
2208                         del e
2209                         retval = (self.pid, 1)
2210
2211                 if retval == (0, 0):
2212                         return None
2213                 self._set_returncode(retval)
2214                 return self.returncode
2215
2216         def cancel(self):
2217                 if self.isAlive():
2218                         try:
2219                                 os.kill(self.pid, signal.SIGTERM)
2220                         except OSError, e:
2221                                 if e.errno != errno.ESRCH:
2222                                         raise
2223                                 del e
2224
2225                 self.cancelled = True
2226                 if self.pid is not None:
2227                         self.wait()
2228                 return self.returncode
2229
2230         def isAlive(self):
2231                 return self.pid is not None and \
2232                         self.returncode is None
2233
2234         def _wait(self):
2235
2236                 if self.returncode is not None:
2237                         return self.returncode
2238
2239                 if self._registered:
2240                         self.scheduler.schedule(self._reg_id)
2241                         self._unregister()
2242                         if self.returncode is not None:
2243                                 return self.returncode
2244
2245                 try:
2246                         wait_retval = os.waitpid(self.pid, 0)
2247                 except OSError, e:
2248                         if e.errno != errno.ECHILD:
2249                                 raise
2250                         del e
2251                         self._set_returncode((self.pid, 1))
2252                 else:
2253                         self._set_returncode(wait_retval)
2254
2255                 return self.returncode
2256
2257         def _unregister(self):
2258                 """
2259                 Unregister from the scheduler and close open files.
2260                 """
2261
2262                 self._registered = False
2263
2264                 if self._reg_id is not None:
2265                         self.scheduler.unregister(self._reg_id)
2266                         self._reg_id = None
2267
2268                 if self._files is not None:
2269                         for f in self._files.itervalues():
2270                                 f.close()
2271                         self._files = None
2272
2273         def _set_returncode(self, wait_retval):
2274
2275                 retval = wait_retval[1]
2276
2277                 if retval != os.EX_OK:
2278                         if retval & 0xff:
2279                                 retval = (retval & 0xff) << 8
2280                         else:
2281                                 retval = retval >> 8
2282
2283                 self.returncode = retval
2284
2285 class SpawnProcess(SubProcess):
2286
2287         """
2288         Constructor keyword args are passed into portage.process.spawn().
2289         The required "args" keyword argument will be passed as the first
2290         spawn() argument.
2291         """
2292
2293         _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2294                 "uid", "gid", "groups", "umask", "logfile",
2295                 "path_lookup", "pre_exec")
2296
2297         __slots__ = ("args",) + \
2298                 _spawn_kwarg_names
2299
2300         _file_names = ("log", "process", "stdout")
2301         _files_dict = slot_dict_class(_file_names, prefix="")
2302
2303         def _start(self):
2304
2305                 if self.cancelled:
2306                         return
2307
2308                 if self.fd_pipes is None:
2309                         self.fd_pipes = {}
2310                 fd_pipes = self.fd_pipes
2311                 fd_pipes.setdefault(0, sys.stdin.fileno())
2312                 fd_pipes.setdefault(1, sys.stdout.fileno())
2313                 fd_pipes.setdefault(2, sys.stderr.fileno())
2314
2315                 # flush any pending output
2316                 for fd in fd_pipes.itervalues():
2317                         if fd == sys.stdout.fileno():
2318                                 sys.stdout.flush()
2319                         if fd == sys.stderr.fileno():
2320                                 sys.stderr.flush()
2321
2322                 logfile = self.logfile
2323                 self._files = self._files_dict()
2324                 files = self._files
2325
2326                 master_fd, slave_fd = self._pipe(fd_pipes)
2327                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2328                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2329
2330                 null_input = None
2331                 fd_pipes_orig = fd_pipes.copy()
2332                 if self.background:
2333                         # TODO: Use job control functions like tcsetpgrp() to control
2334                         # access to stdin. Until then, use /dev/null so that any
2335                         # attempts to read from stdin will immediately return EOF
2336                         # instead of blocking indefinitely.
2337                         null_input = open('/dev/null', 'rb')
2338                         fd_pipes[0] = null_input.fileno()
2339                 else:
2340                         fd_pipes[0] = fd_pipes_orig[0]
2341
2342                 files.process = os.fdopen(master_fd, 'rb')
2343                 if logfile is not None:
2344
2345                         fd_pipes[1] = slave_fd
2346                         fd_pipes[2] = slave_fd
2347
2348                         files.log = open(logfile, mode='ab')
2349                         portage.util.apply_secpass_permissions(logfile,
2350                                 uid=portage.portage_uid, gid=portage.portage_gid,
2351                                 mode=0660)
2352
2353                         if not self.background:
2354                                 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2355
2356                         output_handler = self._output_handler
2357
2358                 else:
2359
2360                         # Create a dummy pipe so the scheduler can monitor
2361                         # the process from inside a poll() loop.
2362                         fd_pipes[self._dummy_pipe_fd] = slave_fd
2363                         if self.background:
2364                                 fd_pipes[1] = slave_fd
2365                                 fd_pipes[2] = slave_fd
2366                         output_handler = self._dummy_handler
2367
2368                 kwargs = {}
2369                 for k in self._spawn_kwarg_names:
2370                         v = getattr(self, k)
2371                         if v is not None:
2372                                 kwargs[k] = v
2373
2374                 kwargs["fd_pipes"] = fd_pipes
2375                 kwargs["returnpid"] = True
2376                 kwargs.pop("logfile", None)
2377
2378                 self._reg_id = self.scheduler.register(files.process.fileno(),
2379                         self._registered_events, output_handler)
2380                 self._registered = True
2381
2382                 retval = self._spawn(self.args, **kwargs)
2383
2384                 os.close(slave_fd)
2385                 if null_input is not None:
2386                         null_input.close()
2387
2388                 if isinstance(retval, int):
2389                         # spawn failed
2390                         self._unregister()
2391                         self.returncode = retval
2392                         self.wait()
2393                         return
2394
2395                 self.pid = retval[0]
2396                 portage.process.spawned_pids.remove(self.pid)
2397
2398         def _pipe(self, fd_pipes):
2399                 """
2400                 @type fd_pipes: dict
2401                 @param fd_pipes: pipes from which to copy terminal size if desired.
2402                 """
2403                 return os.pipe()
2404
2405         def _spawn(self, args, **kwargs):
2406                 return portage.process.spawn(args, **kwargs)
2407
2408         def _output_handler(self, fd, event):
2409
2410                 if event & PollConstants.POLLIN:
2411
2412                         files = self._files
2413                         buf = array.array('B')
2414                         try:
2415                                 buf.fromfile(files.process, self._bufsize)
2416                         except EOFError:
2417                                 pass
2418
2419                         if buf:
2420                                 if not self.background:
2421                                         buf.tofile(files.stdout)
2422                                         files.stdout.flush()
2423                                 buf.tofile(files.log)
2424                                 files.log.flush()
2425                         else:
2426                                 self._unregister()
2427                                 self.wait()
2428
2429                 self._unregister_if_appropriate(event)
2430                 return self._registered
2431
2432         def _dummy_handler(self, fd, event):
2433                 """
2434                 This method is mainly interested in detecting EOF, since
2435                 the only purpose of the pipe is to allow the scheduler to
2436                 monitor the process from inside a poll() loop.
2437                 """
2438
2439                 if event & PollConstants.POLLIN:
2440
2441                         buf = array.array('B')
2442                         try:
2443                                 buf.fromfile(self._files.process, self._bufsize)
2444                         except EOFError:
2445                                 pass
2446
2447                         if buf:
2448                                 pass
2449                         else:
2450                                 self._unregister()
2451                                 self.wait()
2452
2453                 self._unregister_if_appropriate(event)
2454                 return self._registered
2455
2456 class MiscFunctionsProcess(SpawnProcess):
2457         """
2458         Spawns misc-functions.sh with an existing ebuild environment.
2459         """
2460
2461         __slots__ = ("commands", "phase", "pkg", "settings")
2462
2463         def _start(self):
2464                 settings = self.settings
2465                 settings.pop("EBUILD_PHASE", None)
2466                 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2467                 misc_sh_binary = os.path.join(portage_bin_path,
2468                         os.path.basename(portage.const.MISC_SH_BINARY))
2469
2470                 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2471                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2472
2473                 portage._doebuild_exit_status_unlink(
2474                         settings.get("EBUILD_EXIT_STATUS_FILE"))
2475
2476                 SpawnProcess._start(self)
2477
2478         def _spawn(self, args, **kwargs):
2479                 settings = self.settings
2480                 debug = settings.get("PORTAGE_DEBUG") == "1"
2481                 return portage.spawn(" ".join(args), settings,
2482                         debug=debug, **kwargs)
2483
2484         def _set_returncode(self, wait_retval):
2485                 SpawnProcess._set_returncode(self, wait_retval)
2486                 self.returncode = portage._doebuild_exit_status_check_and_log(
2487                         self.settings, self.phase, self.returncode)
2488
2489 class EbuildFetcher(SpawnProcess):
2490
2491         __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2492                 ("_build_dir",)
2493
2494         def _start(self):
2495
2496                 root_config = self.pkg.root_config
2497                 portdb = root_config.trees["porttree"].dbapi
2498                 ebuild_path = portdb.findname(self.pkg.cpv)
2499                 settings = self.config_pool.allocate()
2500                 settings.setcpv(self.pkg)
2501
2502                 # In prefetch mode, logging goes to emerge-fetch.log and the builddir
2503                 # should not be touched since otherwise it could interfere with
2504                 # another instance of the same cpv concurrently being built for a
2505                 # different $ROOT (currently, builds only cooperate with prefetchers
2506                 # that are spawned for the same $ROOT).
2507                 if not self.prefetch:
2508                         self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2509                         self._build_dir.lock()
2510                         self._build_dir.clean()
2511                         portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2512                         if self.logfile is None:
2513                                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2514
2515                 phase = "fetch"
2516                 if self.fetchall:
2517                         phase = "fetchall"
2518
2519                 # If any incremental variables have been overridden
2520                 # via the environment, those values need to be passed
2521                 # along here so that they are correctly considered by
2522                 # the config instance in the subproccess.
2523                 fetch_env = os.environ.copy()
2524
2525                 nocolor = settings.get("NOCOLOR")
2526                 if nocolor is not None:
2527                         fetch_env["NOCOLOR"] = nocolor
2528
2529                 fetch_env["PORTAGE_NICENESS"] = "0"
2530                 if self.prefetch:
2531                         fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2532
2533                 ebuild_binary = os.path.join(
2534                         settings["PORTAGE_BIN_PATH"], "ebuild")
2535
2536                 fetch_args = [ebuild_binary, ebuild_path, phase]
2537                 debug = settings.get("PORTAGE_DEBUG") == "1"
2538                 if debug:
2539                         fetch_args.append("--debug")
2540
2541                 self.args = fetch_args
2542                 self.env = fetch_env
2543                 SpawnProcess._start(self)
2544
2545         def _pipe(self, fd_pipes):
2546                 """When appropriate, use a pty so that fetcher progress bars,
2547                 like wget has, will work properly."""
2548                 if self.background or not sys.stdout.isatty():
2549                         # When the output only goes to a log file,
2550                         # there's no point in creating a pty.
2551                         return os.pipe()
2552                 stdout_pipe = fd_pipes.get(1)
2553                 got_pty, master_fd, slave_fd = \
2554                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2555                 return (master_fd, slave_fd)
2556
2557         def _set_returncode(self, wait_retval):
2558                 SpawnProcess._set_returncode(self, wait_retval)
2559                 # Collect elog messages that might have been
2560                 # created by the pkg_nofetch phase.
2561                 if self._build_dir is not None:
2562                         # Skip elog messages for prefetch, in order to avoid duplicates.
2563                         if not self.prefetch and self.returncode != os.EX_OK:
2564                                 elog_out = None
2565                                 if self.logfile is not None:
2566                                         if self.background:
2567                                                 elog_out = open(self.logfile, 'a')
2568                                 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2569                                 if self.logfile is not None:
2570                                         msg += ", Log file:"
2571                                 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2572                                 if self.logfile is not None:
2573                                         eerror(" '%s'" % (self.logfile,),
2574                                                 phase="unpack", key=self.pkg.cpv, out=elog_out)
2575                                 if elog_out is not None:
2576                                         elog_out.close()
2577                         if not self.prefetch:
2578                                 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2579                         features = self._build_dir.settings.features
2580                         if self.returncode == os.EX_OK:
2581                                 self._build_dir.clean()
2582                         self._build_dir.unlock()
2583                         self.config_pool.deallocate(self._build_dir.settings)
2584                         self._build_dir = None
2585
2586 class EbuildBuildDir(SlotObject):
2587
2588         __slots__ = ("dir_path", "pkg", "settings",
2589                 "locked", "_catdir", "_lock_obj")
2590
2591         def __init__(self, **kwargs):
2592                 SlotObject.__init__(self, **kwargs)
2593                 self.locked = False
2594
2595         def lock(self):
2596                 """
2597                 This raises an AlreadyLocked exception if lock() is called
2598                 while a lock is already held. In order to avoid this, call
2599                 unlock() or check whether the "locked" attribute is True
2600                 or False before calling lock().
2601                 """
2602                 if self._lock_obj is not None:
2603                         raise self.AlreadyLocked((self._lock_obj,))
2604
2605                 dir_path = self.dir_path
2606                 if dir_path is None:
2607                         root_config = self.pkg.root_config
2608                         portdb = root_config.trees["porttree"].dbapi
2609                         ebuild_path = portdb.findname(self.pkg.cpv)
2610                         settings = self.settings
2611                         settings.setcpv(self.pkg)
2612                         debug = settings.get("PORTAGE_DEBUG") == "1"
2613                         use_cache = 1 # always true
2614                         portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2615                                 self.settings, debug, use_cache, portdb)
2616                         dir_path = self.settings["PORTAGE_BUILDDIR"]
2617
2618                 catdir = os.path.dirname(dir_path)
2619                 self._catdir = catdir
2620
2621                 portage.util.ensure_dirs(os.path.dirname(catdir),
2622                         gid=portage.portage_gid,
2623                         mode=070, mask=0)
2624                 catdir_lock = None
2625                 try:
2626                         catdir_lock = portage.locks.lockdir(catdir)
2627                         portage.util.ensure_dirs(catdir,
2628                                 gid=portage.portage_gid,
2629                                 mode=070, mask=0)
2630                         self._lock_obj = portage.locks.lockdir(dir_path)
2631                 finally:
2632                         self.locked = self._lock_obj is not None
2633                         if catdir_lock is not None:
2634                                 portage.locks.unlockdir(catdir_lock)
2635
2636         def clean(self):
2637                 """Uses shutil.rmtree() rather than spawning a 'clean' phase. Disabled
2638                 by keepwork or keeptemp in FEATURES."""
2639                 settings = self.settings
2640                 features = settings.features
2641                 if not ("keepwork" in features or "keeptemp" in features):
2642                         try:
2643                                 shutil.rmtree(settings["PORTAGE_BUILDDIR"])
2644                         except EnvironmentError, e:
2645                                 if e.errno != errno.ENOENT:
2646                                         raise
2647                                 del e
2648
2649         def unlock(self):
2650                 if self._lock_obj is None:
2651                         return
2652
2653                 portage.locks.unlockdir(self._lock_obj)
2654                 self._lock_obj = None
2655                 self.locked = False
2656
2657                 catdir = self._catdir
2658                 catdir_lock = None
2659                 try:
2660                         catdir_lock = portage.locks.lockdir(catdir)
2661                 finally:
2662                         if catdir_lock:
2663                                 try:
2664                                         os.rmdir(catdir)
2665                                 except OSError, e:
2666                                         if e.errno not in (errno.ENOENT,
2667                                                 errno.ENOTEMPTY, errno.EEXIST):
2668                                                 raise
2669                                         del e
2670                                 portage.locks.unlockdir(catdir_lock)
2671
2672         class AlreadyLocked(portage.exception.PortageException):
2673                 pass
2674
2675 class EbuildBuild(CompositeTask):
2676
2677         __slots__ = ("args_set", "config_pool", "find_blockers",
2678                 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2679                 "prefetcher", "settings", "world_atom") + \
2680                 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2681
2682         def _start(self):
2683
2684                 logger = self.logger
2685                 opts = self.opts
2686                 pkg = self.pkg
2687                 settings = self.settings
2688                 world_atom = self.world_atom
2689                 root_config = pkg.root_config
2690                 tree = "porttree"
2691                 self._tree = tree
2692                 portdb = root_config.trees[tree].dbapi
2693                 settings.setcpv(pkg)
2694                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2695                 ebuild_path = portdb.findname(self.pkg.cpv)
2696                 self._ebuild_path = ebuild_path
2697
2698                 prefetcher = self.prefetcher
2699                 if prefetcher is None:
2700                         pass
2701                 elif not prefetcher.isAlive():
2702                         prefetcher.cancel()
2703                 elif prefetcher.poll() is None:
2704
2705                         waiting_msg = "Fetching files " + \
2706                                 "in the background. " + \
2707                                 "To view fetch progress, run `tail -f " + \
2708                                 "/var/log/emerge-fetch.log` in another " + \
2709                                 "terminal."
2710                         msg_prefix = colorize("GOOD", " * ")
2711                         from textwrap import wrap
2712                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2713                                 for line in wrap(waiting_msg, 65))
2714                         if not self.background:
2715                                 writemsg(waiting_msg, noiselevel=-1)
2716
2717                         self._current_task = prefetcher
2718                         prefetcher.addExitListener(self._prefetch_exit)
2719                         return
2720
2721                 self._prefetch_exit(prefetcher)
2722
2723         def _prefetch_exit(self, prefetcher):
2724
2725                 opts = self.opts
2726                 pkg = self.pkg
2727                 settings = self.settings
2728
2729                 if opts.fetchonly:
2730                                 fetcher = EbuildFetchonly(
2731                                         fetch_all=opts.fetch_all_uri,
2732                                         pkg=pkg, pretend=opts.pretend,
2733                                         settings=settings)
2734                                 retval = fetcher.execute()
2735                                 self.returncode = retval
2736                                 self.wait()
2737                                 return
2738
2739                 fetcher = EbuildFetcher(config_pool=self.config_pool,
2740                         fetchall=opts.fetch_all_uri,
2741                         fetchonly=opts.fetchonly,
2742                         background=self.background,
2743                         pkg=pkg, scheduler=self.scheduler)
2744
2745                 self._start_task(fetcher, self._fetch_exit)
2746
2747         def _fetch_exit(self, fetcher):
2748                 opts = self.opts
2749                 pkg = self.pkg
2750
2751                 fetch_failed = False
2752                 if opts.fetchonly:
2753                         fetch_failed = self._final_exit(fetcher) != os.EX_OK
2754                 else:
2755                         fetch_failed = self._default_exit(fetcher) != os.EX_OK
2756
2757                 if fetch_failed and fetcher.logfile is not None and \
2758                         os.path.exists(fetcher.logfile):
2759                         self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2760
2761                 if not fetch_failed and fetcher.logfile is not None:
2762                         # Fetch was successful, so remove the fetch log.
2763                         try:
2764                                 os.unlink(fetcher.logfile)
2765                         except OSError:
2766                                 pass
2767
2768                 if fetch_failed or opts.fetchonly:
2769                         self.wait()
2770                         return
2771
2772                 logger = self.logger
2773                 opts = self.opts
2774                 pkg_count = self.pkg_count
2775                 scheduler = self.scheduler
2776                 settings = self.settings
2777                 features = settings.features
2778                 ebuild_path = self._ebuild_path
2779                 system_set = pkg.root_config.sets["system"]
2780
2781                 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2782                 self._build_dir.lock()
2783
2784                 # Cleaning is triggered before the setup
2785                 # phase, in portage.doebuild().
2786                 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2787                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2788                 short_msg = "emerge: (%s of %s) %s Clean" % \
2789                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2790                 logger.log(msg, short_msg=short_msg)
2791
2792                 #buildsyspkg: Check if we need to _force_ binary package creation
2793                 self._issyspkg = "buildsyspkg" in features and \
2794                                 system_set.findAtomForPackage(pkg) and \
2795                                 not opts.buildpkg
2796
2797                 if opts.buildpkg or self._issyspkg:
2798
2799                         self._buildpkg = True
2800
2801                         msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2802                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2803                         short_msg = "emerge: (%s of %s) %s Compile" % \
2804                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2805                         logger.log(msg, short_msg=short_msg)
2806
2807                 else:
2808                         msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2809                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2810                         short_msg = "emerge: (%s of %s) %s Compile" % \
2811                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2812                         logger.log(msg, short_msg=short_msg)
2813
2814                 build = EbuildExecuter(background=self.background, pkg=pkg,
2815                         scheduler=scheduler, settings=settings)
2816                 self._start_task(build, self._build_exit)
2817
2818         def _unlock_builddir(self):
2819                 portage.elog.elog_process(self.pkg.cpv, self.settings)
2820                 self._build_dir.unlock()
2821
2822         def _build_exit(self, build):
2823                 if self._default_exit(build) != os.EX_OK:
2824                         self._unlock_builddir()
2825                         self.wait()
2826                         return
2827
2828                 opts = self.opts
2829                 buildpkg = self._buildpkg
2830
2831                 if not buildpkg:
2832                         self._final_exit(build)
2833                         self.wait()
2834                         return
2835
2836                 if self._issyspkg:
2837                         msg = ">>> This is a system package, " + \
2838                                 "let's pack a rescue tarball.\n"
2839
2840                         log_path = self.settings.get("PORTAGE_LOG_FILE")
2841                         if log_path is not None:
2842                                 log_file = open(log_path, 'a')
2843                                 try:
2844                                         log_file.write(msg)
2845                                 finally:
2846                                         log_file.close()
2847
2848                         if not self.background:
2849                                 portage.writemsg_stdout(msg, noiselevel=-1)
2850
2851                 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2852                         scheduler=self.scheduler, settings=self.settings)
2853
2854                 self._start_task(packager, self._buildpkg_exit)
2855
2856         def _buildpkg_exit(self, packager):
2857                 """
2858                 Released build dir lock when there is a failure or
2859                 when in buildpkgonly mode. Otherwise, the lock will
2860                 be released when merge() is called.
2861                 """
2862
2863                 if self._default_exit(packager) != os.EX_OK:
2864                         self._unlock_builddir()
2865                         self.wait()
2866                         return
2867
2868                 if self.opts.buildpkgonly:
2869                         # Need to call "clean" phase for buildpkgonly mode
2870                         portage.elog.elog_process(self.pkg.cpv, self.settings)
2871                         phase = "clean"
2872                         clean_phase = EbuildPhase(background=self.background,
2873                                 pkg=self.pkg, phase=phase,
2874                                 scheduler=self.scheduler, settings=self.settings,
2875                                 tree=self._tree)
2876                         self._start_task(clean_phase, self._clean_exit)
2877                         return
2878
2879                 # Continue holding the builddir lock until
2880                 # after the package has been installed.
2881                 self._current_task = None
2882                 self.returncode = packager.returncode
2883                 self.wait()
2884
2885         def _clean_exit(self, clean_phase):
2886                 if self._final_exit(clean_phase) != os.EX_OK or \
2887                         self.opts.buildpkgonly:
2888                         self._unlock_builddir()
2889                 self.wait()
2890
2891         def install(self):
2892                 """
2893                 Install the package and then clean up and release locks.
2894                 Only call this after the build has completed successfully
2895                 and neither fetchonly nor buildpkgonly mode are enabled.
2896                 """
2897
2898                 find_blockers = self.find_blockers
2899                 ldpath_mtimes = self.ldpath_mtimes
2900                 logger = self.logger
2901                 pkg = self.pkg
2902                 pkg_count = self.pkg_count
2903                 settings = self.settings
2904                 world_atom = self.world_atom
2905                 ebuild_path = self._ebuild_path
2906                 tree = self._tree
2907
2908                 merge = EbuildMerge(find_blockers=self.find_blockers,
2909                         ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2910                         pkg_count=pkg_count, pkg_path=ebuild_path,
2911                         scheduler=self.scheduler,
2912                         settings=settings, tree=tree, world_atom=world_atom)
2913
2914                 msg = " === (%s of %s) Merging (%s::%s)" % \
2915                         (pkg_count.curval, pkg_count.maxval,
2916                         pkg.cpv, ebuild_path)
2917                 short_msg = "emerge: (%s of %s) %s Merge" % \
2918                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2919                 logger.log(msg, short_msg=short_msg)
2920
2921                 try:
2922                         rval = merge.execute()
2923                 finally:
2924                         self._unlock_builddir()
2925
2926                 return rval
2927
2928 class EbuildExecuter(CompositeTask):
2929
2930         __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2931
2932         _phases = ("prepare", "configure", "compile", "test", "install")
2933
2934         _live_eclasses = frozenset([
2935                 "bzr",
2936                 "cvs",
2937                 "darcs",
2938                 "git",
2939                 "mercurial",
2940                 "subversion"
2941         ])
2942
2943         def _start(self):
2944                 self._tree = "porttree"
2945                 pkg = self.pkg
2946                 phase = "clean"
2947                 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2948                         scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2949                 self._start_task(clean_phase, self._clean_phase_exit)
2950
2951         def _clean_phase_exit(self, clean_phase):
2952
2953                 if self._default_exit(clean_phase) != os.EX_OK:
2954                         self.wait()
2955                         return
2956
2957                 pkg = self.pkg
2958                 scheduler = self.scheduler
2959                 settings = self.settings
2960                 cleanup = 1
2961
2962                 # This initializes PORTAGE_LOG_FILE.
2963                 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2964
2965                 setup_phase = EbuildPhase(background=self.background,
2966                         pkg=pkg, phase="setup", scheduler=scheduler,
2967                         settings=settings, tree=self._tree)
2968
2969                 setup_phase.addExitListener(self._setup_exit)
2970                 self._current_task = setup_phase
2971                 self.scheduler.scheduleSetup(setup_phase)
2972
2973         def _setup_exit(self, setup_phase):
2974
2975                 if self._default_exit(setup_phase) != os.EX_OK:
2976                         self.wait()
2977                         return
2978
2979                 unpack_phase = EbuildPhase(background=self.background,
2980                         pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2981                         settings=self.settings, tree=self._tree)
2982
2983                 if self._live_eclasses.intersection(self.pkg.inherited):
2984                         # Serialize $DISTDIR access for live ebuilds since
2985                         # otherwise they can interfere with eachother.
2986
2987                         unpack_phase.addExitListener(self._unpack_exit)
2988                         self._current_task = unpack_phase
2989                         self.scheduler.scheduleUnpack(unpack_phase)
2990
2991                 else:
2992                         self._start_task(unpack_phase, self._unpack_exit)
2993
2994         def _unpack_exit(self, unpack_phase):
2995
2996                 if self._default_exit(unpack_phase) != os.EX_OK:
2997                         self.wait()
2998                         return
2999
3000                 ebuild_phases = TaskSequence(scheduler=self.scheduler)
3001
3002                 pkg = self.pkg
3003                 phases = self._phases
3004                 eapi = pkg.metadata["EAPI"]
3005                 if eapi in ("0", "1"):
3006                         # skip src_prepare and src_configure
3007                         phases = phases[2:]
3008
3009                 for phase in phases:
3010                         ebuild_phases.add(EbuildPhase(background=self.background,
3011                                 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3012                                 settings=self.settings, tree=self._tree))
3013
3014                 self._start_task(ebuild_phases, self._default_final_exit)
3015
3016 class EbuildMetadataPhase(SubProcess):
3017
3018         """
3019         Asynchronous interface for the ebuild "depend" phase which is
3020         used to extract metadata from the ebuild.
3021         """
3022
3023         __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3024                 "ebuild_mtime", "portdb", "repo_path", "settings") + \
3025                 ("_raw_metadata",)
3026
3027         _file_names = ("ebuild",)
3028         _files_dict = slot_dict_class(_file_names, prefix="")
3029         _metadata_fd = 9
3030
3031         def _start(self):
3032                 settings = self.settings
3033                 settings.reset()
3034                 ebuild_path = self.ebuild_path
3035                 debug = settings.get("PORTAGE_DEBUG") == "1"
3036                 master_fd = None
3037                 slave_fd = None
3038                 fd_pipes = None
3039                 if self.fd_pipes is not None:
3040                         fd_pipes = self.fd_pipes.copy()
3041                 else:
3042                         fd_pipes = {}
3043
3044                 fd_pipes.setdefault(0, sys.stdin.fileno())
3045                 fd_pipes.setdefault(1, sys.stdout.fileno())
3046                 fd_pipes.setdefault(2, sys.stderr.fileno())
3047
3048                 # flush any pending output
3049                 for fd in fd_pipes.itervalues():
3050                         if fd == sys.stdout.fileno():
3051                                 sys.stdout.flush()
3052                         if fd == sys.stderr.fileno():
3053                                 sys.stderr.flush()
3054
3055                 fd_pipes_orig = fd_pipes.copy()
3056                 self._files = self._files_dict()
3057                 files = self._files
3058
3059                 master_fd, slave_fd = os.pipe()
3060                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3061                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3062
3063                 fd_pipes[self._metadata_fd] = slave_fd
3064
3065                 self._raw_metadata = []
3066                 files.ebuild = os.fdopen(master_fd, 'r')
3067                 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3068                         self._registered_events, self._output_handler)
3069                 self._registered = True
3070
3071                 retval = portage.doebuild(ebuild_path, "depend",
3072                         settings["ROOT"], settings, debug,
3073                         mydbapi=self.portdb, tree="porttree",
3074                         fd_pipes=fd_pipes, returnpid=True)
3075
3076                 os.close(slave_fd)
3077
3078                 if isinstance(retval, int):
3079                         # doebuild failed before spawning
3080                         self._unregister()
3081                         self.returncode = retval
3082                         self.wait()
3083                         return
3084
3085                 self.pid = retval[0]
3086                 portage.process.spawned_pids.remove(self.pid)
3087
3088         def _output_handler(self, fd, event):
3089
3090                 if event & PollConstants.POLLIN:
3091                         self._raw_metadata.append(self._files.ebuild.read())
3092                         if not self._raw_metadata[-1]:
3093                                 self._unregister()
3094                                 self.wait()
3095
3096                 self._unregister_if_appropriate(event)
3097                 return self._registered
3098
3099         def _set_returncode(self, wait_retval):
3100                 SubProcess._set_returncode(self, wait_retval)
3101                 if self.returncode == os.EX_OK:
3102                         metadata_lines = "".join(self._raw_metadata).splitlines()
3103                         if len(portage.auxdbkeys) != len(metadata_lines):
3104                                 # Don't trust bash's returncode if the
3105                                 # number of lines is incorrect.
3106                                 self.returncode = 1
3107                         else:
3108                                 metadata = izip(portage.auxdbkeys, metadata_lines)
3109                                 self.metadata_callback(self.cpv, self.ebuild_path,
3110                                         self.repo_path, metadata, self.ebuild_mtime)
3111
3112 class EbuildProcess(SpawnProcess):
3113
3114         __slots__ = ("phase", "pkg", "settings", "tree")
3115
3116         def _start(self):
3117                 # Don't open the log file during the clean phase since the
3118                 # open file can result in an nfs lock on $T/build.log which
3119                 # prevents the clean phase from removing $T.
3120                 if self.phase not in ("clean", "cleanrm"):
3121                         self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3122                 SpawnProcess._start(self)
3123
3124         def _pipe(self, fd_pipes):
3125                 stdout_pipe = fd_pipes.get(1)
3126                 got_pty, master_fd, slave_fd = \
3127                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3128                 return (master_fd, slave_fd)
3129
3130         def _spawn(self, args, **kwargs):
3131
3132                 root_config = self.pkg.root_config
3133                 tree = self.tree
3134                 mydbapi = root_config.trees[tree].dbapi
3135                 settings = self.settings
3136                 ebuild_path = settings["EBUILD"]
3137                 debug = settings.get("PORTAGE_DEBUG") == "1"
3138
3139                 rval = portage.doebuild(ebuild_path, self.phase,
3140                         root_config.root, settings, debug,
3141                         mydbapi=mydbapi, tree=tree, **kwargs)
3142
3143                 return rval
3144
3145         def _set_returncode(self, wait_retval):
3146                 SpawnProcess._set_returncode(self, wait_retval)
3147
3148                 if self.phase not in ("clean", "cleanrm"):
3149                         self.returncode = portage._doebuild_exit_status_check_and_log(
3150                                 self.settings, self.phase, self.returncode)
3151
3152                 if self.phase == "test" and self.returncode != os.EX_OK and \
3153                         "test-fail-continue" in self.settings.features:
3154                         self.returncode = os.EX_OK
3155
3156                 portage._post_phase_userpriv_perms(self.settings)
3157
3158 class EbuildPhase(CompositeTask):
3159
3160         __slots__ = ("background", "pkg", "phase",
3161                 "scheduler", "settings", "tree")
3162
3163         _post_phase_cmds = portage._post_phase_cmds
3164
3165         def _start(self):
3166
3167                 ebuild_process = EbuildProcess(background=self.background,
3168                         pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3169                         settings=self.settings, tree=self.tree)
3170
3171                 self._start_task(ebuild_process, self._ebuild_exit)
3172
3173         def _ebuild_exit(self, ebuild_process):
3174
3175                 if self.phase == "install":
3176                         out = None
3177                         log_path = self.settings.get("PORTAGE_LOG_FILE")
3178                         log_file = None
3179                         if self.background and log_path is not None:
3180                                 log_file = open(log_path, 'a')
3181                                 out = log_file
3182                         try:
3183                                 portage._check_build_log(self.settings, out=out)
3184                         finally:
3185                                 if log_file is not None:
3186                                         log_file.close()
3187
3188                 if self._default_exit(ebuild_process) != os.EX_OK:
3189                         self.wait()
3190                         return
3191
3192                 settings = self.settings
3193
3194                 if self.phase == "install":
3195                         portage._post_src_install_uid_fix(settings)
3196
3197                 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3198                 if post_phase_cmds is not None:
3199                         post_phase = MiscFunctionsProcess(background=self.background,
3200                                 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3201                                 scheduler=self.scheduler, settings=settings)
3202                         self._start_task(post_phase, self._post_phase_exit)
3203                         return
3204
3205                 self.returncode = ebuild_process.returncode
3206                 self._current_task = None
3207                 self.wait()
3208
3209         def _post_phase_exit(self, post_phase):
3210                 if self._final_exit(post_phase) != os.EX_OK:
3211                         writemsg("!!! post %s failed; exiting.\n" % self.phase,
3212                                 noiselevel=-1)
3213                 self._current_task = None
3214                 self.wait()
3215                 return
3216
3217 class EbuildBinpkg(EbuildProcess):
3218         """
3219         This assumes that src_install() has successfully completed.
3220         """
3221         __slots__ = ("_binpkg_tmpfile",)
3222
3223         def _start(self):
3224                 self.phase = "package"
3225                 self.tree = "porttree"
3226                 pkg = self.pkg
3227                 root_config = pkg.root_config
3228                 portdb = root_config.trees["porttree"].dbapi
3229                 bintree = root_config.trees["bintree"]
3230                 ebuild_path = portdb.findname(self.pkg.cpv)
3231                 settings = self.settings
3232                 debug = settings.get("PORTAGE_DEBUG") == "1"
3233
3234                 bintree.prevent_collision(pkg.cpv)
3235                 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3236                         pkg.cpv + ".tbz2." + str(os.getpid()))
3237                 self._binpkg_tmpfile = binpkg_tmpfile
3238                 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3239                 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3240
3241                 try:
3242                         EbuildProcess._start(self)
3243                 finally:
3244                         settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3245
3246         def _set_returncode(self, wait_retval):
3247                 EbuildProcess._set_returncode(self, wait_retval)
3248
3249                 pkg = self.pkg
3250                 bintree = pkg.root_config.trees["bintree"]
3251                 binpkg_tmpfile = self._binpkg_tmpfile
3252                 if self.returncode == os.EX_OK:
3253                         bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3254
3255 class EbuildMerge(SlotObject):
3256
3257         __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3258                 "pkg", "pkg_count", "pkg_path", "pretend",
3259                 "scheduler", "settings", "tree", "world_atom")
3260
3261         def execute(self):
3262                 root_config = self.pkg.root_config
3263                 settings = self.settings
3264                 retval = portage.merge(settings["CATEGORY"],
3265                         settings["PF"], settings["D"],
3266                         os.path.join(settings["PORTAGE_BUILDDIR"],
3267                         "build-info"), root_config.root, settings,
3268                         myebuild=settings["EBUILD"],
3269                         mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3270                         vartree=root_config.trees["vartree"],
3271                         prev_mtimes=self.ldpath_mtimes,
3272                         scheduler=self.scheduler,
3273                         blockers=self.find_blockers)
3274
3275                 if retval == os.EX_OK:
3276                         self.world_atom(self.pkg)
3277                         self._log_success()
3278
3279                 return retval
3280
3281         def _log_success(self):
3282                 pkg = self.pkg
3283                 pkg_count = self.pkg_count
3284                 pkg_path = self.pkg_path
3285                 logger = self.logger
3286                 if "noclean" not in self.settings.features:
3287                         short_msg = "emerge: (%s of %s) %s Clean Post" % \
3288                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3289                         logger.log((" === (%s of %s) " + \
3290                                 "Post-Build Cleaning (%s::%s)") % \
3291                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3292                                 short_msg=short_msg)
3293                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3294                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3295
3296 class PackageUninstall(AsynchronousTask):
3297
3298         __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3299
3300         def _start(self):
3301                 try:
3302                         unmerge(self.pkg.root_config, self.opts, "unmerge",
3303                                 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3304                                 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3305                                 writemsg_level=self._writemsg_level)
3306                 except UninstallFailure, e:
3307                         self.returncode = e.status
3308                 else:
3309                         self.returncode = os.EX_OK
3310                 self.wait()
3311
3312         def _writemsg_level(self, msg, level=0, noiselevel=0):
3313
3314                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3315                 background = self.background
3316
3317                 if log_path is None:
3318                         if not (background and level < logging.WARNING):
3319                                 portage.util.writemsg_level(msg,
3320                                         level=level, noiselevel=noiselevel)
3321                 else:
3322                         if not background:
3323                                 portage.util.writemsg_level(msg,
3324                                         level=level, noiselevel=noiselevel)
3325
3326                         f = open(log_path, 'a')
3327                         try:
3328                                 f.write(msg)
3329                         finally:
3330                                 f.close()
3331
3332 class Binpkg(CompositeTask):
3333
3334         __slots__ = ("find_blockers",
3335                 "ldpath_mtimes", "logger", "opts",
3336                 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3337                 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3338                 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3339
3340         def _writemsg_level(self, msg, level=0, noiselevel=0):
3341
3342                 if not self.background:
3343                         portage.util.writemsg_level(msg,
3344                                 level=level, noiselevel=noiselevel)
3345
3346                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3347                 if  log_path is not None:
3348                         f = open(log_path, 'a')
3349                         try:
3350                                 f.write(msg)
3351                         finally:
3352                                 f.close()
3353
3354         def _start(self):
3355
3356                 pkg = self.pkg
3357                 settings = self.settings
3358                 settings.setcpv(pkg)
3359                 self._tree = "bintree"
3360                 self._bintree = self.pkg.root_config.trees[self._tree]
3361                 self._verify = not self.opts.pretend
3362
3363                 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3364                         "portage", pkg.category, pkg.pf)
3365                 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3366                         pkg=pkg, settings=settings)
3367                 self._image_dir = os.path.join(dir_path, "image")
3368                 self._infloc = os.path.join(dir_path, "build-info")
3369                 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3370                 settings["EBUILD"] = self._ebuild_path
3371                 debug = settings.get("PORTAGE_DEBUG") == "1"
3372                 portage.doebuild_environment(self._ebuild_path, "setup",
3373                         settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3374                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3375
3376                 # The prefetcher has already completed or it
3377                 # could be running now. If it's running now,
3378                 # wait for it to complete since it holds
3379                 # a lock on the file being fetched. The
3380                 # portage.locks functions are only designed
3381                 # to work between separate processes. Since
3382                 # the lock is held by the current process,
3383                 # use the scheduler and fetcher methods to
3384                 # synchronize with the fetcher.
3385                 prefetcher = self.prefetcher
3386                 if prefetcher is None:
3387                         pass
3388                 elif not prefetcher.isAlive():
3389                         prefetcher.cancel()
3390                 elif prefetcher.poll() is None:
3391
3392                         waiting_msg = ("Fetching '%s' " + \
3393                                 "in the background. " + \
3394                                 "To view fetch progress, run `tail -f " + \
3395                                 "/var/log/emerge-fetch.log` in another " + \
3396                                 "terminal.") % prefetcher.pkg_path
3397                         msg_prefix = colorize("GOOD", " * ")
3398                         from textwrap import wrap
3399                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3400                                 for line in wrap(waiting_msg, 65))
3401                         if not self.background:
3402                                 writemsg(waiting_msg, noiselevel=-1)
3403
3404                         self._current_task = prefetcher
3405                         prefetcher.addExitListener(self._prefetch_exit)
3406                         return
3407
3408                 self._prefetch_exit(prefetcher)
3409
3410         def _prefetch_exit(self, prefetcher):
3411
3412                 pkg = self.pkg
3413                 pkg_count = self.pkg_count
3414                 if not (self.opts.pretend or self.opts.fetchonly):
3415                         self._build_dir.lock()
3416                         # If necessary, discard old log so that we don't
3417                         # append to it.
3418                         for x in ('.logid', 'temp/build.log'):
3419                                 try:
3420                                         os.unlink(os.path.join(self._build_dir.dir_path, x))
3421                                 except OSError:
3422                                         pass
3423                         # Initialze PORTAGE_LOG_FILE.
3424                         portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3425                 fetcher = BinpkgFetcher(background=self.background,
3426                         logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3427                         pretend=self.opts.pretend, scheduler=self.scheduler)
3428                 pkg_path = fetcher.pkg_path
3429                 self._pkg_path = pkg_path
3430
3431                 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3432
3433                         msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3434                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3435                         short_msg = "emerge: (%s of %s) %s Fetch" % \
3436                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3437                         self.logger.log(msg, short_msg=short_msg)
3438                         self._start_task(fetcher, self._fetcher_exit)
3439                         return
3440
3441                 self._fetcher_exit(fetcher)
3442
3443         def _fetcher_exit(self, fetcher):
3444
3445                 # The fetcher only has a returncode when
3446                 # --getbinpkg is enabled.
3447                 if fetcher.returncode is not None:
3448                         self._fetched_pkg = True
3449                         if self._default_exit(fetcher) != os.EX_OK:
3450                                 self._unlock_builddir()
3451                                 self.wait()
3452                                 return
3453
3454                 if self.opts.pretend:
3455                         self._current_task = None
3456                         self.returncode = os.EX_OK
3457                         self.wait()
3458                         return
3459
3460                 verifier = None
3461                 if self._verify:
3462                         logfile = None
3463                         if self.background:
3464                                 logfile = self.settings.get("PORTAGE_LOG_FILE")
3465                         verifier = BinpkgVerifier(background=self.background,
3466                                 logfile=logfile, pkg=self.pkg)
3467                         self._start_task(verifier, self._verifier_exit)
3468                         return
3469
3470                 self._verifier_exit(verifier)
3471
3472         def _verifier_exit(self, verifier):
3473                 if verifier is not None and \
3474                         self._default_exit(verifier) != os.EX_OK:
3475                         self._unlock_builddir()
3476                         self.wait()
3477                         return
3478
3479                 logger = self.logger
3480                 pkg = self.pkg
3481                 pkg_count = self.pkg_count
3482                 pkg_path = self._pkg_path
3483
3484                 if self._fetched_pkg:
3485                         self._bintree.inject(pkg.cpv, filename=pkg_path)
3486
3487                 if self.opts.fetchonly:
3488                         self._current_task = None
3489                         self.returncode = os.EX_OK
3490                         self.wait()
3491                         return
3492
3493                 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3494                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3495                 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3496                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3497                 logger.log(msg, short_msg=short_msg)
3498
3499                 phase = "clean"
3500                 settings = self.settings
3501                 ebuild_phase = EbuildPhase(background=self.background,
3502                         pkg=pkg, phase=phase, scheduler=self.scheduler,
3503                         settings=settings, tree=self._tree)
3504
3505                 self._start_task(ebuild_phase, self._clean_exit)
3506
3507         def _clean_exit(self, clean_phase):
3508                 if self._default_exit(clean_phase) != os.EX_OK:
3509                         self._unlock_builddir()
3510                         self.wait()
3511                         return
3512
3513                 dir_path = self._build_dir.dir_path
3514
3515                 infloc = self._infloc
3516                 pkg = self.pkg
3517                 pkg_path = self._pkg_path
3518
3519                 dir_mode = 0755
3520                 for mydir in (dir_path, self._image_dir, infloc):
3521                         portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3522                                 gid=portage.data.portage_gid, mode=dir_mode)
3523
3524                 # This initializes PORTAGE_LOG_FILE.
3525                 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3526                 self._writemsg_level(">>> Extracting info\n")
3527
3528                 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3529                 check_missing_metadata = ("CATEGORY", "PF")
3530                 missing_metadata = set()
3531                 for k in check_missing_metadata:
3532                         v = pkg_xpak.getfile(k)
3533                         if not v:
3534                                 missing_metadata.add(k)
3535
3536                 pkg_xpak.unpackinfo(infloc)
3537                 for k in missing_metadata:
3538                         if k == "CATEGORY":
3539                                 v = pkg.category
3540                         elif k == "PF":
3541                                 v = pkg.pf
3542                         else:
3543                                 continue
3544
3545                         f = open(os.path.join(infloc, k), 'wb')
3546                         try:
3547                                 f.write(v + "\n")
3548                         finally:
3549                                 f.close()
3550
3551                 # Store the md5sum in the vdb.
3552                 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3553                 try:
3554                         f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3555                 finally:
3556                         f.close()
3557
3558                 # This gives bashrc users an opportunity to do various things
3559                 # such as remove binary packages after they're installed.
3560                 settings = self.settings
3561                 settings.setcpv(self.pkg)
3562                 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3563                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3564
3565                 phase = "setup"
3566                 setup_phase = EbuildPhase(background=self.background,
3567                         pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3568                         settings=settings, tree=self._tree)
3569
3570                 setup_phase.addExitListener(self._setup_exit)
3571                 self._current_task = setup_phase
3572                 self.scheduler.scheduleSetup(setup_phase)
3573
3574         def _setup_exit(self, setup_phase):
3575                 if self._default_exit(setup_phase) != os.EX_OK:
3576                         self._unlock_builddir()
3577                         self.wait()
3578                         return
3579
3580                 extractor = BinpkgExtractorAsync(background=self.background,
3581                         image_dir=self._image_dir,
3582                         pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3583                 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3584                 self._start_task(extractor, self._extractor_exit)
3585
3586         def _extractor_exit(self, extractor):
3587                 if self._final_exit(extractor) != os.EX_OK:
3588                         self._unlock_builddir()
3589                         writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3590                                 noiselevel=-1)
3591                 self.wait()
3592
3593         def _unlock_builddir(self):
3594                 if self.opts.pretend or self.opts.fetchonly:
3595                         return
3596                 portage.elog.elog_process(self.pkg.cpv, self.settings)
3597                 self._build_dir.unlock()
3598
3599         def install(self):
3600
3601                 # This gives bashrc users an opportunity to do various things
3602                 # such as remove binary packages after they're installed.
3603                 settings = self.settings
3604                 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3605                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3606
3607                 merge = EbuildMerge(find_blockers=self.find_blockers,
3608                         ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3609                         pkg=self.pkg, pkg_count=self.pkg_count,
3610                         pkg_path=self._pkg_path, scheduler=self.scheduler,
3611                         settings=settings, tree=self._tree, world_atom=self.world_atom)
3612
3613                 try:
3614                         retval = merge.execute()
3615                 finally:
3616                         settings.pop("PORTAGE_BINPKG_FILE", None)
3617                         self._unlock_builddir()
3618                 return retval
3619
3620 class BinpkgFetcher(SpawnProcess):
3621
3622         __slots__ = ("pkg", "pretend",
3623                 "locked", "pkg_path", "_lock_obj")
3624
3625         def __init__(self, **kwargs):
3626                 SpawnProcess.__init__(self, **kwargs)
3627                 pkg = self.pkg
3628                 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3629
3630         def _start(self):
3631
3632                 if self.cancelled:
3633                         return
3634
3635                 pkg = self.pkg
3636                 pretend = self.pretend
3637                 bintree = pkg.root_config.trees["bintree"]
3638                 settings = bintree.settings
3639                 use_locks = "distlocks" in settings.features
3640                 pkg_path = self.pkg_path
3641
3642                 if not pretend:
3643                         portage.util.ensure_dirs(os.path.dirname(pkg_path))
3644                         if use_locks:
3645                                 self.lock()
3646                 exists = os.path.exists(pkg_path)
3647                 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3648                 if not (pretend or resume):
3649                         # Remove existing file or broken symlink.
3650                         try:
3651                                 os.unlink(pkg_path)
3652                         except OSError:
3653                                 pass
3654
3655                 # urljoin doesn't work correctly with
3656                 # unrecognized protocols like sftp
3657                 if bintree._remote_has_index:
3658                         rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3659                         if not rel_uri:
3660                                 rel_uri = pkg.cpv + ".tbz2"
3661                         uri = bintree._remote_base_uri.rstrip("/") + \
3662                                 "/" + rel_uri.lstrip("/")
3663                 else:
3664                         uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3665                                 "/" + pkg.pf + ".tbz2"
3666
3667                 if pretend:
3668                         portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3669                         self.returncode = os.EX_OK
3670                         self.wait()
3671                         return
3672
3673                 protocol = urlparse.urlparse(uri)[0]
3674                 fcmd_prefix = "FETCHCOMMAND"
3675                 if resume:
3676                         fcmd_prefix = "RESUMECOMMAND"
3677                 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3678                 if not fcmd:
3679                         fcmd = settings.get(fcmd_prefix)
3680
3681                 fcmd_vars = {
3682                         "DISTDIR" : os.path.dirname(pkg_path),
3683                         "URI"     : uri,
3684                         "FILE"    : os.path.basename(pkg_path)
3685                 }
3686
3687                 fetch_env = dict(settings.iteritems())
3688                 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3689                         for x in shlex.split(fcmd)]
3690
3691                 if self.fd_pipes is None:
3692                         self.fd_pipes = {}
3693                 fd_pipes = self.fd_pipes
3694
3695                 # Redirect all output to stdout since some fetchers like
3696                 # wget pollute stderr (if portage detects a problem then it
3697                 # can send it's own message to stderr).
3698                 fd_pipes.setdefault(0, sys.stdin.fileno())
3699                 fd_pipes.setdefault(1, sys.stdout.fileno())
3700                 fd_pipes.setdefault(2, sys.stdout.fileno())
3701
3702                 self.args = fetch_args
3703                 self.env = fetch_env
3704                 SpawnProcess._start(self)
3705
3706         def _set_returncode(self, wait_retval):
3707                 SpawnProcess._set_returncode(self, wait_retval)
3708                 if self.returncode == os.EX_OK:
3709                         # If possible, update the mtime to match the remote package if
3710                         # the fetcher didn't already do it automatically.
3711                         bintree = self.pkg.root_config.trees["bintree"]
3712                         if bintree._remote_has_index:
3713                                 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3714                                 if remote_mtime is not None:
3715                                         try:
3716                                                 remote_mtime = long(remote_mtime)
3717                                         except ValueError:
3718                                                 pass
3719                                         else:
3720                                                 try:
3721                                                         local_mtime = long(os.stat(self.pkg_path).st_mtime)
3722                                                 except OSError:
3723                                                         pass
3724                                                 else:
3725                                                         if remote_mtime != local_mtime:
3726                                                                 try:
3727                                                                         os.utime(self.pkg_path,
3728                                                                                 (remote_mtime, remote_mtime))
3729                                                                 except OSError:
3730                                                                         pass
3731
3732                 if self.locked:
3733                         self.unlock()
3734
3735         def lock(self):
3736                 """
3737                 This raises an AlreadyLocked exception if lock() is called
3738                 while a lock is already held. In order to avoid this, call
3739                 unlock() or check whether the "locked" attribute is True
3740                 or False before calling lock().
3741                 """
3742                 if self._lock_obj is not None:
3743                         raise self.AlreadyLocked((self._lock_obj,))
3744
3745                 self._lock_obj = portage.locks.lockfile(
3746                         self.pkg_path, wantnewlockfile=1)
3747                 self.locked = True
3748
3749         class AlreadyLocked(portage.exception.PortageException):
3750                 pass
3751
3752         def unlock(self):
3753                 if self._lock_obj is None:
3754                         return
3755                 portage.locks.unlockfile(self._lock_obj)
3756                 self._lock_obj = None
3757                 self.locked = False
3758
3759 class BinpkgVerifier(AsynchronousTask):
3760         __slots__ = ("logfile", "pkg",)
3761
3762         def _start(self):
3763                 """
3764                 Note: Unlike a normal AsynchronousTask.start() method,
3765                 this one does all work is synchronously. The returncode
3766                 attribute will be set before it returns.
3767                 """
3768
3769                 pkg = self.pkg
3770                 root_config = pkg.root_config
3771                 bintree = root_config.trees["bintree"]
3772                 rval = os.EX_OK
3773                 stdout_orig = sys.stdout
3774                 stderr_orig = sys.stderr
3775                 log_file = None
3776                 if self.background and self.logfile is not None:
3777                         log_file = open(self.logfile, 'a')
3778                 try:
3779                         if log_file is not None:
3780                                 sys.stdout = log_file
3781                                 sys.stderr = log_file
3782                         try:
3783                                 bintree.digestCheck(pkg)
3784                         except portage.exception.FileNotFound:
3785                                 writemsg("!!! Fetching Binary failed " + \
3786                                         "for '%s'\n" % pkg.cpv, noiselevel=-1)
3787                                 rval = 1
3788                         except portage.exception.DigestException, e:
3789                                 writemsg("\n!!! Digest verification failed:\n",
3790                                         noiselevel=-1)
3791                                 writemsg("!!! %s\n" % e.value[0],
3792                                         noiselevel=-1)
3793                                 writemsg("!!! Reason: %s\n" % e.value[1],
3794                                         noiselevel=-1)
3795                                 writemsg("!!! Got: %s\n" % e.value[2],
3796                                         noiselevel=-1)
3797                                 writemsg("!!! Expected: %s\n" % e.value[3],
3798                                         noiselevel=-1)
3799                                 rval = 1
3800                         if rval != os.EX_OK:
3801                                 pkg_path = bintree.getname(pkg.cpv)
3802                                 head, tail = os.path.split(pkg_path)
3803                                 temp_filename = portage._checksum_failure_temp_file(head, tail)
3804                                 writemsg("File renamed to '%s'\n" % (temp_filename,),
3805                                         noiselevel=-1)
3806                 finally:
3807                         sys.stdout = stdout_orig
3808                         sys.stderr = stderr_orig
3809                         if log_file is not None:
3810                                 log_file.close()
3811
3812                 self.returncode = rval
3813                 self.wait()
3814
3815 class BinpkgPrefetcher(CompositeTask):
3816
3817         __slots__ = ("pkg",) + \
3818                 ("pkg_path", "_bintree",)
3819
3820         def _start(self):
3821                 self._bintree = self.pkg.root_config.trees["bintree"]
3822                 fetcher = BinpkgFetcher(background=self.background,
3823                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3824                         scheduler=self.scheduler)
3825                 self.pkg_path = fetcher.pkg_path
3826                 self._start_task(fetcher, self._fetcher_exit)
3827
3828         def _fetcher_exit(self, fetcher):
3829
3830                 if self._default_exit(fetcher) != os.EX_OK:
3831                         self.wait()
3832                         return
3833
3834                 verifier = BinpkgVerifier(background=self.background,
3835                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3836                 self._start_task(verifier, self._verifier_exit)
3837
3838         def _verifier_exit(self, verifier):
3839                 if self._default_exit(verifier) != os.EX_OK:
3840                         self.wait()
3841                         return
3842
3843                 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3844
3845                 self._current_task = None
3846                 self.returncode = os.EX_OK
3847                 self.wait()
3848
3849 class BinpkgExtractorAsync(SpawnProcess):
3850
3851         __slots__ = ("image_dir", "pkg", "pkg_path")
3852
3853         _shell_binary = portage.const.BASH_BINARY
3854
3855         def _start(self):
3856                 self.args = [self._shell_binary, "-c",
3857                         "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3858                         (portage._shell_quote(self.pkg_path),
3859                         portage._shell_quote(self.image_dir))]
3860
3861                 self.env = self.pkg.root_config.settings.environ()
3862                 SpawnProcess._start(self)
3863
3864 class MergeListItem(CompositeTask):
3865
3866         """
3867         TODO: For parallel scheduling, everything here needs asynchronous
3868         execution support (start, poll, and wait methods).
3869         """
3870
3871         __slots__ = ("args_set",
3872                 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3873                 "find_blockers", "logger", "mtimedb", "pkg",
3874                 "pkg_count", "pkg_to_replace", "prefetcher",
3875                 "settings", "statusMessage", "world_atom") + \
3876                 ("_install_task",)
3877
3878         def _start(self):
3879
3880                 pkg = self.pkg
3881                 build_opts = self.build_opts
3882
3883                 if pkg.installed:
3884                         # uninstall,  executed by self.merge()
3885                         self.returncode = os.EX_OK
3886                         self.wait()
3887                         return
3888
3889                 args_set = self.args_set
3890                 find_blockers = self.find_blockers
3891                 logger = self.logger
3892                 mtimedb = self.mtimedb
3893                 pkg_count = self.pkg_count
3894                 scheduler = self.scheduler
3895                 settings = self.settings
3896                 world_atom = self.world_atom
3897                 ldpath_mtimes = mtimedb["ldpath"]
3898
3899                 action_desc = "Emerging"
3900                 preposition = "for"
3901                 if pkg.type_name == "binary":
3902                         action_desc += " binary"
3903
3904                 if build_opts.fetchonly:
3905                         action_desc = "Fetching"
3906
3907                 msg = "%s (%s of %s) %s" % \
3908                         (action_desc,
3909                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3910                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3911                         colorize("GOOD", pkg.cpv))
3912
3913                 portdb = pkg.root_config.trees["porttree"].dbapi
3914                 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3915                 if portdir_repo_name:
3916                         pkg_repo_name = pkg.metadata.get("repository")
3917                         if pkg_repo_name != portdir_repo_name:
3918                                 if not pkg_repo_name:
3919                                         pkg_repo_name = "unknown repo"
3920                                 msg += " from %s" % pkg_repo_name
3921
3922                 if pkg.root != "/":
3923                         msg += " %s %s" % (preposition, pkg.root)
3924
3925                 if not build_opts.pretend:
3926                         self.statusMessage(msg)
3927                         logger.log(" >>> emerge (%s of %s) %s to %s" % \
3928                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3929
3930                 if pkg.type_name == "ebuild":
3931
3932                         build = EbuildBuild(args_set=args_set,
3933                                 background=self.background,
3934                                 config_pool=self.config_pool,
3935                                 find_blockers=find_blockers,
3936                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3937                                 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3938                                 prefetcher=self.prefetcher, scheduler=scheduler,
3939                                 settings=settings, world_atom=world_atom)
3940
3941                         self._install_task = build
3942                         self._start_task(build, self._default_final_exit)
3943                         return
3944
3945                 elif pkg.type_name == "binary":
3946
3947                         binpkg = Binpkg(background=self.background,
3948                                 find_blockers=find_blockers,
3949                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3950                                 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3951                                 prefetcher=self.prefetcher, settings=settings,
3952                                 scheduler=scheduler, world_atom=world_atom)
3953
3954                         self._install_task = binpkg
3955                         self._start_task(binpkg, self._default_final_exit)
3956                         return
3957
3958         def _poll(self):
3959                 self._install_task.poll()
3960                 return self.returncode
3961
3962         def _wait(self):
3963                 self._install_task.wait()
3964                 return self.returncode
3965
3966         def merge(self):
3967
3968                 pkg = self.pkg
3969                 build_opts = self.build_opts
3970                 find_blockers = self.find_blockers
3971                 logger = self.logger
3972                 mtimedb = self.mtimedb
3973                 pkg_count = self.pkg_count
3974                 prefetcher = self.prefetcher
3975                 scheduler = self.scheduler
3976                 settings = self.settings
3977                 world_atom = self.world_atom
3978                 ldpath_mtimes = mtimedb["ldpath"]
3979
3980                 if pkg.installed:
3981                         if not (build_opts.buildpkgonly or \
3982                                 build_opts.fetchonly or build_opts.pretend):
3983
3984                                 uninstall = PackageUninstall(background=self.background,
3985                                         ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
3986                                         pkg=pkg, scheduler=scheduler, settings=settings)
3987
3988                                 uninstall.start()
3989                                 retval = uninstall.wait()
3990                                 if retval != os.EX_OK:
3991                                         return retval
3992                         return os.EX_OK
3993
3994                 if build_opts.fetchonly or \
3995                         build_opts.buildpkgonly:
3996                         return self.returncode
3997
3998                 retval = self._install_task.install()
3999                 return retval
4000
4001 class PackageMerge(AsynchronousTask):
4002         """
4003         TODO: Implement asynchronous merge so that the scheduler can
4004         run while a merge is executing.
4005         """
4006
4007         __slots__ = ("merge",)
4008
4009         def _start(self):
4010
4011                 pkg = self.merge.pkg
4012                 pkg_count = self.merge.pkg_count
4013
4014                 if pkg.installed:
4015                         action_desc = "Uninstalling"
4016                         preposition = "from"
4017                 else:
4018                         action_desc = "Installing"
4019                         preposition = "to"
4020
4021                 msg = "%s %s" % (action_desc, colorize("GOOD", pkg.cpv))
4022
4023                 if pkg.root != "/":
4024                         msg += " %s %s" % (preposition, pkg.root)
4025
4026                 if not self.merge.build_opts.fetchonly and \
4027                         not self.merge.build_opts.pretend and \
4028                         not self.merge.build_opts.buildpkgonly:
4029                         self.merge.statusMessage(msg)
4030
4031                 self.returncode = self.merge.merge()
4032                 self.wait()
4033
4034 class DependencyArg(object):
4035         def __init__(self, arg=None, root_config=None):
4036                 self.arg = arg
4037                 self.root_config = root_config
4038
4039         def __str__(self):
4040                 return str(self.arg)
4041
4042 class AtomArg(DependencyArg):
4043         def __init__(self, atom=None, **kwargs):
4044                 DependencyArg.__init__(self, **kwargs)
4045                 self.atom = atom
4046                 if not isinstance(self.atom, portage.dep.Atom):
4047                         self.atom = portage.dep.Atom(self.atom)
4048                 self.set = (self.atom, )
4049
4050 class PackageArg(DependencyArg):
4051         def __init__(self, package=None, **kwargs):
4052                 DependencyArg.__init__(self, **kwargs)
4053                 self.package = package
4054                 self.atom = portage.dep.Atom("=" + package.cpv)
4055                 self.set = (self.atom, )
4056
4057 class SetArg(DependencyArg):
4058         def __init__(self, set=None, **kwargs):
4059                 DependencyArg.__init__(self, **kwargs)
4060                 self.set = set
4061                 self.name = self.arg[len(SETPREFIX):]
4062
4063 class Dependency(SlotObject):
4064         __slots__ = ("atom", "blocker", "depth",
4065                 "parent", "onlydeps", "priority", "root")
4066         def __init__(self, **kwargs):
4067                 SlotObject.__init__(self, **kwargs)
4068                 if self.priority is None:
4069                         self.priority = DepPriority()
4070                 if self.depth is None:
4071                         self.depth = 0
4072
4073 class BlockerCache(portage.cache.mappings.MutableMapping):
4074         """This caches blockers of installed packages so that dep_check does not
4075         have to be done for every single installed package on every invocation of
4076         emerge.  The cache is invalidated whenever it is detected that something
4077         has changed that might alter the results of dep_check() calls:
4078                 1) the set of installed packages (including COUNTER) has changed
4079                 2) the old-style virtuals have changed
4080         """
4081
4082         # Number of uncached packages to trigger cache update, since
4083         # it's wasteful to update it for every vdb change.
4084         _cache_threshold = 5
4085
4086         class BlockerData(object):
4087
4088                 __slots__ = ("__weakref__", "atoms", "counter")
4089
4090                 def __init__(self, counter, atoms):
4091                         self.counter = counter
4092                         self.atoms = atoms
4093
4094         def __init__(self, myroot, vardb):
4095                 self._vardb = vardb
4096                 self._virtuals = vardb.settings.getvirtuals()
4097                 self._cache_filename = os.path.join(myroot,
4098                         portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4099                 self._cache_version = "1"
4100                 self._cache_data = None
4101                 self._modified = set()
4102                 self._load()
4103
4104         def _load(self):
4105                 try:
4106                         f = open(self._cache_filename, mode='rb')
4107                         mypickle = pickle.Unpickler(f)
4108                         try:
4109                                 mypickle.find_global = None
4110                         except AttributeError:
4111                                 # TODO: If py3k, override Unpickler.find_class().
4112                                 pass
4113                         self._cache_data = mypickle.load()
4114                         f.close()
4115                         del f
4116                 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4117                         if isinstance(e, pickle.UnpicklingError):
4118                                 writemsg("!!! Error loading '%s': %s\n" % \
4119                                         (self._cache_filename, str(e)), noiselevel=-1)
4120                         del e
4121
4122                 cache_valid = self._cache_data and \
4123                         isinstance(self._cache_data, dict) and \
4124                         self._cache_data.get("version") == self._cache_version and \
4125                         isinstance(self._cache_data.get("blockers"), dict)
4126                 if cache_valid:
4127                         # Validate all the atoms and counters so that
4128                         # corruption is detected as soon as possible.
4129                         invalid_items = set()
4130                         for k, v in self._cache_data["blockers"].iteritems():
4131                                 if not isinstance(k, basestring):
4132                                         invalid_items.add(k)
4133                                         continue
4134                                 try:
4135                                         if portage.catpkgsplit(k) is None:
4136                                                 invalid_items.add(k)
4137                                                 continue
4138                                 except portage.exception.InvalidData:
4139                                         invalid_items.add(k)
4140                                         continue
4141                                 if not isinstance(v, tuple) or \
4142                                         len(v) != 2:
4143                                         invalid_items.add(k)
4144                                         continue
4145                                 counter, atoms = v
4146                                 if not isinstance(counter, (int, long)):
4147                                         invalid_items.add(k)
4148                                         continue
4149                                 if not isinstance(atoms, (list, tuple)):
4150                                         invalid_items.add(k)
4151                                         continue
4152                                 invalid_atom = False
4153                                 for atom in atoms:
4154                                         if not isinstance(atom, basestring):
4155                                                 invalid_atom = True
4156                                                 break
4157                                         if atom[:1] != "!" or \
4158                                                 not portage.isvalidatom(
4159                                                 atom, allow_blockers=True):
4160                                                 invalid_atom = True
4161                                                 break
4162                                 if invalid_atom:
4163                                         invalid_items.add(k)
4164                                         continue
4165
4166                         for k in invalid_items:
4167                                 del self._cache_data["blockers"][k]
4168                         if not self._cache_data["blockers"]:
4169                                 cache_valid = False
4170
4171                 if not cache_valid:
4172                         self._cache_data = {"version":self._cache_version}
4173                         self._cache_data["blockers"] = {}
4174                         self._cache_data["virtuals"] = self._virtuals
4175                 self._modified.clear()
4176
4177         def flush(self):
4178                 """If the current user has permission and the internal blocker cache
4179                 been updated, save it to disk and mark it unmodified.  This is called
4180                 by emerge after it has proccessed blockers for all installed packages.
4181                 Currently, the cache is only written if the user has superuser
4182                 privileges (since that's required to obtain a lock), but all users
4183                 have read access and benefit from faster blocker lookups (as long as
4184                 the entire cache is still valid).  The cache is stored as a pickled
4185                 dict object with the following format:
4186
4187                 {
4188                         version : "1",
4189                         "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4190                         "virtuals" : vardb.settings.getvirtuals()
4191                 }
4192                 """
4193                 if len(self._modified) >= self._cache_threshold and \
4194                         secpass >= 2:
4195                         try:
4196                                 f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4197                                 pickle.dump(self._cache_data, f, protocol=2)
4198                                 f.close()
4199                                 portage.util.apply_secpass_permissions(
4200                                         self._cache_filename, gid=portage.portage_gid, mode=0644)
4201                         except (IOError, OSError), e:
4202                                 pass
4203                         self._modified.clear()
4204
4205         def __setitem__(self, cpv, blocker_data):
4206                 """
4207                 Update the cache and mark it as modified for a future call to
4208                 self.flush().
4209
4210                 @param cpv: Package for which to cache blockers.
4211                 @type cpv: String
4212                 @param blocker_data: An object with counter and atoms attributes.
4213                 @type blocker_data: BlockerData
4214                 """
4215                 self._cache_data["blockers"][cpv] = \
4216                         (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4217                 self._modified.add(cpv)
4218
4219         def __iter__(self):
4220                 if self._cache_data is None:
4221                         # triggered by python-trace
4222                         return iter([])
4223                 return iter(self._cache_data["blockers"])
4224
4225         def __delitem__(self, cpv):
4226                 del self._cache_data["blockers"][cpv]
4227
4228         def __getitem__(self, cpv):
4229                 """
4230                 @rtype: BlockerData
4231                 @returns: An object with counter and atoms attributes.
4232                 """
4233                 return self.BlockerData(*self._cache_data["blockers"][cpv])
4234
4235 class BlockerDB(object):
4236
4237         def __init__(self, root_config):
4238                 self._root_config = root_config
4239                 self._vartree = root_config.trees["vartree"]
4240                 self._portdb = root_config.trees["porttree"].dbapi
4241
4242                 self._dep_check_trees = None
4243                 self._fake_vartree = None
4244
4245         def _get_fake_vartree(self, acquire_lock=0):
4246                 fake_vartree = self._fake_vartree
4247                 if fake_vartree is None:
4248                         fake_vartree = FakeVartree(self._root_config,
4249                                 acquire_lock=acquire_lock)
4250                         self._fake_vartree = fake_vartree
4251                         self._dep_check_trees = { self._vartree.root : {
4252                                 "porttree"    :  fake_vartree,
4253                                 "vartree"     :  fake_vartree,
4254                         }}
4255                 else:
4256                         fake_vartree.sync(acquire_lock=acquire_lock)
4257                 return fake_vartree
4258
4259         def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4260                 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4261                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4262                 settings = self._vartree.settings
4263                 stale_cache = set(blocker_cache)
4264                 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4265                 dep_check_trees = self._dep_check_trees
4266                 vardb = fake_vartree.dbapi
4267                 installed_pkgs = list(vardb)
4268
4269                 for inst_pkg in installed_pkgs:
4270                         stale_cache.discard(inst_pkg.cpv)
4271                         cached_blockers = blocker_cache.get(inst_pkg.cpv)
4272                         if cached_blockers is not None and \
4273                                 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4274                                 cached_blockers = None
4275                         if cached_blockers is not None:
4276                                 blocker_atoms = cached_blockers.atoms
4277                         else:
4278                                 # Use aux_get() to trigger FakeVartree global
4279                                 # updates on *DEPEND when appropriate.
4280                                 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4281                                 try:
4282                                         portage.dep._dep_check_strict = False
4283                                         success, atoms = portage.dep_check(depstr,
4284                                                 vardb, settings, myuse=inst_pkg.use.enabled,
4285                                                 trees=dep_check_trees, myroot=inst_pkg.root)
4286                                 finally:
4287                                         portage.dep._dep_check_strict = True
4288                                 if not success:
4289                                         pkg_location = os.path.join(inst_pkg.root,
4290                                                 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4291                                         portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4292                                                 (pkg_location, atoms), noiselevel=-1)
4293                                         continue
4294
4295                                 blocker_atoms = [atom for atom in atoms \
4296                                         if atom.startswith("!")]
4297                                 blocker_atoms.sort()
4298                                 counter = long(inst_pkg.metadata["COUNTER"])
4299                                 blocker_cache[inst_pkg.cpv] = \
4300                                         blocker_cache.BlockerData(counter, blocker_atoms)
4301                 for cpv in stale_cache:
4302                         del blocker_cache[cpv]
4303                 blocker_cache.flush()
4304
4305                 blocker_parents = digraph()
4306                 blocker_atoms = []
4307                 for pkg in installed_pkgs:
4308                         for blocker_atom in blocker_cache[pkg.cpv].atoms:
4309                                 blocker_atom = blocker_atom.lstrip("!")
4310                                 blocker_atoms.append(blocker_atom)
4311                                 blocker_parents.add(blocker_atom, pkg)
4312
4313                 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4314                 blocking_pkgs = set()
4315                 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4316                         blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4317
4318                 # Check for blockers in the other direction.
4319                 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4320                 try:
4321                         portage.dep._dep_check_strict = False
4322                         success, atoms = portage.dep_check(depstr,
4323                                 vardb, settings, myuse=new_pkg.use.enabled,
4324                                 trees=dep_check_trees, myroot=new_pkg.root)
4325                 finally:
4326                         portage.dep._dep_check_strict = True
4327                 if not success:
4328                         # We should never get this far with invalid deps.
4329                         show_invalid_depstring_notice(new_pkg, depstr, atoms)
4330                         assert False
4331
4332                 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4333                         if atom[:1] == "!"]
4334                 if blocker_atoms:
4335                         blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4336                         for inst_pkg in installed_pkgs:
4337                                 try:
4338                                         blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4339                                 except (portage.exception.InvalidDependString, StopIteration):
4340                                         continue
4341                                 blocking_pkgs.add(inst_pkg)
4342
4343                 return blocking_pkgs
4344
4345 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4346
4347         msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4348                 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4349         p_type, p_root, p_key, p_status = parent_node
4350         msg = []
4351         if p_status == "nomerge":
4352                 category, pf = portage.catsplit(p_key)
4353                 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4354                 msg.append("Portage is unable to process the dependencies of the ")
4355                 msg.append("'%s' package. " % p_key)
4356                 msg.append("In order to correct this problem, the package ")
4357                 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4358                 msg.append("As a temporary workaround, the --nodeps option can ")
4359                 msg.append("be used to ignore all dependencies.  For reference, ")
4360                 msg.append("the problematic dependencies can be found in the ")
4361                 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4362         else:
4363                 msg.append("This package can not be installed. ")
4364                 msg.append("Please notify the '%s' package maintainer " % p_key)
4365                 msg.append("about this problem.")
4366
4367         msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4368         writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4369
4370 class PackageVirtualDbapi(portage.dbapi):
4371         """
4372         A dbapi-like interface class that represents the state of the installed
4373         package database as new packages are installed, replacing any packages
4374         that previously existed in the same slot. The main difference between
4375         this class and fakedbapi is that this one uses Package instances
4376         internally (passed in via cpv_inject() and cpv_remove() calls).
4377         """
4378         def __init__(self, settings):
4379                 portage.dbapi.__init__(self)
4380                 self.settings = settings
4381                 self._match_cache = {}
4382                 self._cp_map = {}
4383                 self._cpv_map = {}
4384
4385         def clear(self):
4386                 """
4387                 Remove all packages.
4388                 """
4389                 if self._cpv_map:
4390                         self._clear_cache()
4391                         self._cp_map.clear()
4392                         self._cpv_map.clear()
4393
4394         def copy(self):
4395                 obj = PackageVirtualDbapi(self.settings)
4396                 obj._match_cache = self._match_cache.copy()
4397                 obj._cp_map = self._cp_map.copy()
4398                 for k, v in obj._cp_map.iteritems():
4399                         obj._cp_map[k] = v[:]
4400                 obj._cpv_map = self._cpv_map.copy()
4401                 return obj
4402
4403         def __iter__(self):
4404                 return self._cpv_map.itervalues()
4405
4406         def __contains__(self, item):
4407                 existing = self._cpv_map.get(item.cpv)
4408                 if existing is not None and \
4409                         existing == item:
4410                         return True
4411                 return False
4412
4413         def get(self, item, default=None):
4414                 cpv = getattr(item, "cpv", None)
4415                 if cpv is None:
4416                         if len(item) != 4:
4417                                 return default
4418                         type_name, root, cpv, operation = item
4419
4420                 existing = self._cpv_map.get(cpv)
4421                 if existing is not None and \
4422                         existing == item:
4423                         return existing
4424                 return default
4425
4426         def match_pkgs(self, atom):
4427                 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4428
4429         def _clear_cache(self):
4430                 if self._categories is not None:
4431                         self._categories = None
4432                 if self._match_cache:
4433                         self._match_cache = {}
4434
4435         def match(self, origdep, use_cache=1):
4436                 result = self._match_cache.get(origdep)
4437                 if result is not None:
4438                         return result[:]
4439                 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4440                 self._match_cache[origdep] = result
4441                 return result[:]
4442
4443         def cpv_exists(self, cpv):
4444                 return cpv in self._cpv_map
4445
4446         def cp_list(self, mycp, use_cache=1):
4447                 cachelist = self._match_cache.get(mycp)
4448                 # cp_list() doesn't expand old-style virtuals
4449                 if cachelist and cachelist[0].startswith(mycp):
4450                         return cachelist[:]
4451                 cpv_list = self._cp_map.get(mycp)
4452                 if cpv_list is None:
4453                         cpv_list = []
4454                 else:
4455                         cpv_list = [pkg.cpv for pkg in cpv_list]
4456                 self._cpv_sort_ascending(cpv_list)
4457                 if not (not cpv_list and mycp.startswith("virtual/")):
4458                         self._match_cache[mycp] = cpv_list
4459                 return cpv_list[:]
4460
4461         def cp_all(self):
4462                 return list(self._cp_map)
4463
4464         def cpv_all(self):
4465                 return list(self._cpv_map)
4466
4467         def cpv_inject(self, pkg):
4468                 cp_list = self._cp_map.get(pkg.cp)
4469                 if cp_list is None:
4470                         cp_list = []
4471                         self._cp_map[pkg.cp] = cp_list
4472                 e_pkg = self._cpv_map.get(pkg.cpv)
4473                 if e_pkg is not None:
4474                         if e_pkg == pkg:
4475                                 return
4476                         self.cpv_remove(e_pkg)
4477                 for e_pkg in cp_list:
4478                         if e_pkg.slot_atom == pkg.slot_atom:
4479                                 if e_pkg == pkg:
4480                                         return
4481                                 self.cpv_remove(e_pkg)
4482                                 break
4483                 cp_list.append(pkg)
4484                 self._cpv_map[pkg.cpv] = pkg
4485                 self._clear_cache()
4486
4487         def cpv_remove(self, pkg):
4488                 old_pkg = self._cpv_map.get(pkg.cpv)
4489                 if old_pkg != pkg:
4490                         raise KeyError(pkg)
4491                 self._cp_map[pkg.cp].remove(pkg)
4492                 del self._cpv_map[pkg.cpv]
4493                 self._clear_cache()
4494
4495         def aux_get(self, cpv, wants):
4496                 metadata = self._cpv_map[cpv].metadata
4497                 return [metadata.get(x, "") for x in wants]
4498
4499         def aux_update(self, cpv, values):
4500                 self._cpv_map[cpv].metadata.update(values)
4501                 self._clear_cache()
4502
4503 class depgraph(object):
4504
4505         pkg_tree_map = RootConfig.pkg_tree_map
4506
4507         _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4508
4509         def __init__(self, settings, trees, myopts, myparams, spinner):
4510                 self.settings = settings
4511                 self.target_root = settings["ROOT"]
4512                 self.myopts = myopts
4513                 self.myparams = myparams
4514                 self.edebug = 0
4515                 if settings.get("PORTAGE_DEBUG", "") == "1":
4516                         self.edebug = 1
4517                 self.spinner = spinner
4518                 self._running_root = trees["/"]["root_config"]
4519                 self._opts_no_restart = Scheduler._opts_no_restart
4520                 self.pkgsettings = {}
4521                 # Maps slot atom to package for each Package added to the graph.
4522                 self._slot_pkg_map = {}
4523                 # Maps nodes to the reasons they were selected for reinstallation.
4524                 self._reinstall_nodes = {}
4525                 self.mydbapi = {}
4526                 self.trees = {}
4527                 self._trees_orig = trees
4528                 self.roots = {}
4529                 # Contains a filtered view of preferred packages that are selected
4530                 # from available repositories.
4531                 self._filtered_trees = {}
4532                 # Contains installed packages and new packages that have been added
4533                 # to the graph.
4534                 self._graph_trees = {}
4535                 # All Package instances
4536                 self._pkg_cache = {}
4537                 for myroot in trees:
4538                         self.trees[myroot] = {}
4539                         # Create a RootConfig instance that references
4540                         # the FakeVartree instead of the real one.
4541                         self.roots[myroot] = RootConfig(
4542                                 trees[myroot]["vartree"].settings,
4543                                 self.trees[myroot],
4544                                 trees[myroot]["root_config"].setconfig)
4545                         for tree in ("porttree", "bintree"):
4546                                 self.trees[myroot][tree] = trees[myroot][tree]
4547                         self.trees[myroot]["vartree"] = \
4548                                 FakeVartree(trees[myroot]["root_config"],
4549                                         pkg_cache=self._pkg_cache)
4550                         self.pkgsettings[myroot] = portage.config(
4551                                 clone=self.trees[myroot]["vartree"].settings)
4552                         self._slot_pkg_map[myroot] = {}
4553                         vardb = self.trees[myroot]["vartree"].dbapi
4554                         preload_installed_pkgs = "--nodeps" not in self.myopts and \
4555                                 "--buildpkgonly" not in self.myopts
4556                         # This fakedbapi instance will model the state that the vdb will
4557                         # have after new packages have been installed.
4558                         fakedb = PackageVirtualDbapi(vardb.settings)
4559                         if preload_installed_pkgs:
4560                                 for pkg in vardb:
4561                                         self.spinner.update()
4562                                         # This triggers metadata updates via FakeVartree.
4563                                         vardb.aux_get(pkg.cpv, [])
4564                                         fakedb.cpv_inject(pkg)
4565
4566                         # Now that the vardb state is cached in our FakeVartree,
4567                         # we won't be needing the real vartree cache for awhile.
4568                         # To make some room on the heap, clear the vardbapi
4569                         # caches.
4570                         trees[myroot]["vartree"].dbapi._clear_cache()
4571                         gc.collect()
4572
4573                         self.mydbapi[myroot] = fakedb
4574                         def graph_tree():
4575                                 pass
4576                         graph_tree.dbapi = fakedb
4577                         self._graph_trees[myroot] = {}
4578                         self._filtered_trees[myroot] = {}
4579                         # Substitute the graph tree for the vartree in dep_check() since we
4580                         # want atom selections to be consistent with package selections
4581                         # have already been made.
4582                         self._graph_trees[myroot]["porttree"]   = graph_tree
4583                         self._graph_trees[myroot]["vartree"]    = graph_tree
4584                         def filtered_tree():
4585                                 pass
4586                         filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4587                         self._filtered_trees[myroot]["porttree"] = filtered_tree
4588
4589                         # Passing in graph_tree as the vartree here could lead to better
4590                         # atom selections in some cases by causing atoms for packages that
4591                         # have been added to the graph to be preferred over other choices.
4592                         # However, it can trigger atom selections that result in
4593                         # unresolvable direct circular dependencies. For example, this
4594                         # happens with gwydion-dylan which depends on either itself or
4595                         # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4596                         # gwydion-dylan-bin needs to be selected in order to avoid a
4597                         # an unresolvable direct circular dependency.
4598                         #
4599                         # To solve the problem described above, pass in "graph_db" so that
4600                         # packages that have been added to the graph are distinguishable
4601                         # from other available packages and installed packages. Also, pass
4602                         # the parent package into self._select_atoms() calls so that
4603                         # unresolvable direct circular dependencies can be detected and
4604                         # avoided when possible.
4605                         self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4606                         self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4607
4608                         dbs = []
4609                         portdb = self.trees[myroot]["porttree"].dbapi
4610                         bindb  = self.trees[myroot]["bintree"].dbapi
4611                         vardb  = self.trees[myroot]["vartree"].dbapi
4612                         #               (db, pkg_type, built, installed, db_keys)
4613                         if "--usepkgonly" not in self.myopts:
4614                                 db_keys = list(portdb._aux_cache_keys)
4615                                 dbs.append((portdb, "ebuild", False, False, db_keys))
4616                         if "--usepkg" in self.myopts:
4617                                 db_keys = list(bindb._aux_cache_keys)
4618                                 dbs.append((bindb,  "binary", True, False, db_keys))
4619                         db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4620                         dbs.append((vardb, "installed", True, True, db_keys))
4621                         self._filtered_trees[myroot]["dbs"] = dbs
4622                         if "--usepkg" in self.myopts:
4623                                 self.trees[myroot]["bintree"].populate(
4624                                         "--getbinpkg" in self.myopts,
4625                                         "--getbinpkgonly" in self.myopts)
4626                 del trees
4627
4628                 self.digraph=portage.digraph()
4629                 # contains all sets added to the graph
4630                 self._sets = {}
4631                 # contains atoms given as arguments
4632                 self._sets["args"] = InternalPackageSet()
4633                 # contains all atoms from all sets added to the graph, including
4634                 # atoms given as arguments
4635                 self._set_atoms = InternalPackageSet()
4636                 self._atom_arg_map = {}
4637                 # contains all nodes pulled in by self._set_atoms
4638                 self._set_nodes = set()
4639                 # Contains only Blocker -> Uninstall edges
4640                 self._blocker_uninstalls = digraph()
4641                 # Contains only Package -> Blocker edges
4642                 self._blocker_parents = digraph()
4643                 # Contains only irrelevant Package -> Blocker edges
4644                 self._irrelevant_blockers = digraph()
4645                 # Contains only unsolvable Package -> Blocker edges
4646                 self._unsolvable_blockers = digraph()
4647                 # Contains all Blocker -> Blocked Package edges
4648                 self._blocked_pkgs = digraph()
4649                 # Contains world packages that have been protected from
4650                 # uninstallation but may not have been added to the graph
4651                 # if the graph is not complete yet.
4652                 self._blocked_world_pkgs = {}
4653                 self._slot_collision_info = {}
4654                 # Slot collision nodes are not allowed to block other packages since
4655                 # blocker validation is only able to account for one package per slot.
4656                 self._slot_collision_nodes = set()
4657                 self._parent_atoms = {}
4658                 self._slot_conflict_parent_atoms = set()
4659                 self._serialized_tasks_cache = None
4660                 self._scheduler_graph = None
4661                 self._displayed_list = None
4662                 self._pprovided_args = []
4663                 self._missing_args = []
4664                 self._masked_installed = set()
4665                 self._unsatisfied_deps_for_display = []
4666                 self._unsatisfied_blockers_for_display = None
4667                 self._circular_deps_for_display = None
4668                 self._dep_stack = []
4669                 self._unsatisfied_deps = []
4670                 self._initially_unsatisfied_deps = []
4671                 self._ignored_deps = []
4672                 self._required_set_names = set(["system", "world"])
4673                 self._select_atoms = self._select_atoms_highest_available
4674                 self._select_package = self._select_pkg_highest_available
4675                 self._highest_pkg_cache = {}
4676
4677         def _show_slot_collision_notice(self):
4678                 """Show an informational message advising the user to mask one of the
4679                 the packages. In some cases it may be possible to resolve this
4680                 automatically, but support for backtracking (removal nodes that have
4681                 already been selected) will be required in order to handle all possible
4682                 cases.
4683                 """
4684
4685                 if not self._slot_collision_info:
4686                         return
4687
4688                 self._show_merge_list()
4689
4690                 msg = []
4691                 msg.append("\n!!! Multiple package instances within a single " + \
4692                         "package slot have been pulled\n")
4693                 msg.append("!!! into the dependency graph, resulting" + \
4694                         " in a slot conflict:\n\n")
4695                 indent = "  "
4696                 # Max number of parents shown, to avoid flooding the display.
4697                 max_parents = 3
4698                 explanation_columns = 70
4699                 explanations = 0
4700                 for (slot_atom, root), slot_nodes \
4701                         in self._slot_collision_info.iteritems():
4702                         msg.append(str(slot_atom))
4703                         msg.append("\n\n")
4704
4705                         for node in slot_nodes:
4706                                 msg.append(indent)
4707                                 msg.append(str(node))
4708                                 parent_atoms = self._parent_atoms.get(node)
4709                                 if parent_atoms:
4710                                         pruned_list = set()
4711                                         # Prefer conflict atoms over others.
4712                                         for parent_atom in parent_atoms:
4713                                                 if len(pruned_list) >= max_parents:
4714                                                         break
4715                                                 if parent_atom in self._slot_conflict_parent_atoms:
4716                                                         pruned_list.add(parent_atom)
4717
4718                                         # If this package was pulled in by conflict atoms then
4719                                         # show those alone since those are the most interesting.
4720                                         if not pruned_list:
4721                                                 # When generating the pruned list, prefer instances
4722                                                 # of DependencyArg over instances of Package.
4723                                                 for parent_atom in parent_atoms:
4724                                                         if len(pruned_list) >= max_parents:
4725                                                                 break
4726                                                         parent, atom = parent_atom
4727                                                         if isinstance(parent, DependencyArg):
4728                                                                 pruned_list.add(parent_atom)
4729                                                 # Prefer Packages instances that themselves have been
4730                                                 # pulled into collision slots.
4731                                                 for parent_atom in parent_atoms:
4732                                                         if len(pruned_list) >= max_parents:
4733                                                                 break
4734                                                         parent, atom = parent_atom
4735                                                         if isinstance(parent, Package) and \
4736                                                                 (parent.slot_atom, parent.root) \
4737                                                                 in self._slot_collision_info:
4738                                                                 pruned_list.add(parent_atom)
4739                                                 for parent_atom in parent_atoms:
4740                                                         if len(pruned_list) >= max_parents:
4741                                                                 break
4742                                                         pruned_list.add(parent_atom)
4743                                         omitted_parents = len(parent_atoms) - len(pruned_list)
4744                                         parent_atoms = pruned_list
4745                                         msg.append(" pulled in by\n")
4746                                         for parent_atom in parent_atoms:
4747                                                 parent, atom = parent_atom
4748                                                 msg.append(2*indent)
4749                                                 if isinstance(parent,
4750                                                         (PackageArg, AtomArg)):
4751                                                         # For PackageArg and AtomArg types, it's
4752                                                         # redundant to display the atom attribute.
4753                                                         msg.append(str(parent))
4754                                                 else:
4755                                                         # Display the specific atom from SetArg or
4756                                                         # Package types.
4757                                                         msg.append("%s required by %s" % (atom, parent))
4758                                                 msg.append("\n")
4759                                         if omitted_parents:
4760                                                 msg.append(2*indent)
4761                                                 msg.append("(and %d more)\n" % omitted_parents)
4762                                 else:
4763                                         msg.append(" (no parents)\n")
4764                                 msg.append("\n")
4765                         explanation = self._slot_conflict_explanation(slot_nodes)
4766                         if explanation:
4767                                 explanations += 1
4768                                 msg.append(indent + "Explanation:\n\n")
4769                                 for line in textwrap.wrap(explanation, explanation_columns):
4770                                         msg.append(2*indent + line + "\n")
4771                                 msg.append("\n")
4772                 msg.append("\n")
4773                 sys.stderr.write("".join(msg))
4774                 sys.stderr.flush()
4775
4776                 explanations_for_all = explanations == len(self._slot_collision_info)
4777
4778                 if explanations_for_all or "--quiet" in self.myopts:
4779                         return
4780
4781                 msg = []
4782                 msg.append("It may be possible to solve this problem ")
4783                 msg.append("by using package.mask to prevent one of ")
4784                 msg.append("those packages from being selected. ")
4785                 msg.append("However, it is also possible that conflicting ")
4786                 msg.append("dependencies exist such that they are impossible to ")
4787                 msg.append("satisfy simultaneously.  If such a conflict exists in ")
4788                 msg.append("the dependencies of two different packages, then those ")
4789                 msg.append("packages can not be installed simultaneously.")
4790
4791                 from formatter import AbstractFormatter, DumbWriter
4792                 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4793                 for x in msg:
4794                         f.add_flowing_data(x)
4795                 f.end_paragraph(1)
4796
4797                 msg = []
4798                 msg.append("For more information, see MASKED PACKAGES ")
4799                 msg.append("section in the emerge man page or refer ")
4800                 msg.append("to the Gentoo Handbook.")
4801                 for x in msg:
4802                         f.add_flowing_data(x)
4803                 f.end_paragraph(1)
4804                 f.writer.flush()
4805
4806         def _slot_conflict_explanation(self, slot_nodes):
4807                 """
4808                 When a slot conflict occurs due to USE deps, there are a few
4809                 different cases to consider:
4810
4811                 1) New USE are correctly set but --newuse wasn't requested so an
4812                    installed package with incorrect USE happened to get pulled
4813                    into graph before the new one.
4814
4815                 2) New USE are incorrectly set but an installed package has correct
4816                    USE so it got pulled into the graph, and a new instance also got
4817                    pulled in due to --newuse or an upgrade.
4818
4819                 3) Multiple USE deps exist that can't be satisfied simultaneously,
4820                    and multiple package instances got pulled into the same slot to
4821                    satisfy the conflicting deps.
4822
4823                 Currently, explanations and suggested courses of action are generated
4824                 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4825                 """
4826
4827                 if len(slot_nodes) != 2:
4828                         # Suggestions are only implemented for
4829                         # conflicts between two packages.
4830                         return None
4831
4832                 all_conflict_atoms = self._slot_conflict_parent_atoms
4833                 matched_node = None
4834                 matched_atoms = None
4835                 unmatched_node = None
4836                 for node in slot_nodes:
4837                         parent_atoms = self._parent_atoms.get(node)
4838                         if not parent_atoms:
4839                                 # Normally, there are always parent atoms. If there are
4840                                 # none then something unexpected is happening and there's
4841                                 # currently no suggestion for this case.
4842                                 return None
4843                         conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4844                         for parent_atom in conflict_atoms:
4845                                 parent, atom = parent_atom
4846                                 if not atom.use:
4847                                         # Suggestions are currently only implemented for cases
4848                                         # in which all conflict atoms have USE deps.
4849                                         return None
4850                         if conflict_atoms:
4851                                 if matched_node is not None:
4852                                         # If conflict atoms match multiple nodes
4853                                         # then there's no suggestion.
4854                                         return None
4855                                 matched_node = node
4856                                 matched_atoms = conflict_atoms
4857                         else:
4858                                 if unmatched_node is not None:
4859                                         # Neither node is matched by conflict atoms, and
4860                                         # there is no suggestion for this case.
4861                                         return None
4862                                 unmatched_node = node
4863
4864                 if matched_node is None or unmatched_node is None:
4865                         # This shouldn't happen.
4866                         return None
4867
4868                 if unmatched_node.installed and not matched_node.installed and \
4869                         unmatched_node.cpv == matched_node.cpv:
4870                         # If the conflicting packages are the same version then
4871                         # --newuse should be all that's needed. If they are different
4872                         # versions then there's some other problem.
4873                         return "New USE are correctly set, but --newuse wasn't" + \
4874                                 " requested, so an installed package with incorrect USE " + \
4875                                 "happened to get pulled into the dependency graph. " + \
4876                                 "In order to solve " + \
4877                                 "this, either specify the --newuse option or explicitly " + \
4878                                 " reinstall '%s'." % matched_node.slot_atom
4879
4880                 if matched_node.installed and not unmatched_node.installed:
4881                         atoms = sorted(set(atom for parent, atom in matched_atoms))
4882                         explanation = ("New USE for '%s' are incorrectly set. " + \
4883                                 "In order to solve this, adjust USE to satisfy '%s'") % \
4884                                 (matched_node.slot_atom, atoms[0])
4885                         if len(atoms) > 1:
4886                                 for atom in atoms[1:-1]:
4887                                         explanation += ", '%s'" % (atom,)
4888                                 if len(atoms) > 2:
4889                                         explanation += ","
4890                                 explanation += " and '%s'" % (atoms[-1],)
4891                         explanation += "."
4892                         return explanation
4893
4894                 return None
4895
4896         def _process_slot_conflicts(self):
4897                 """
4898                 Process slot conflict data to identify specific atoms which
4899                 lead to conflict. These atoms only match a subset of the
4900                 packages that have been pulled into a given slot.
4901                 """
4902                 for (slot_atom, root), slot_nodes \
4903                         in self._slot_collision_info.iteritems():
4904
4905                         all_parent_atoms = set()
4906                         for pkg in slot_nodes:
4907                                 parent_atoms = self._parent_atoms.get(pkg)
4908                                 if not parent_atoms:
4909                                         continue
4910                                 all_parent_atoms.update(parent_atoms)
4911
4912                         for pkg in slot_nodes:
4913                                 parent_atoms = self._parent_atoms.get(pkg)
4914                                 if parent_atoms is None:
4915                                         parent_atoms = set()
4916                                         self._parent_atoms[pkg] = parent_atoms
4917                                 for parent_atom in all_parent_atoms:
4918                                         if parent_atom in parent_atoms:
4919                                                 continue
4920                                         # Use package set for matching since it will match via
4921                                         # PROVIDE when necessary, while match_from_list does not.
4922                                         parent, atom = parent_atom
4923                                         atom_set = InternalPackageSet(
4924                                                 initial_atoms=(atom,))
4925                                         if atom_set.findAtomForPackage(pkg):
4926                                                 parent_atoms.add(parent_atom)
4927                                         else:
4928                                                 self._slot_conflict_parent_atoms.add(parent_atom)
4929
4930         def _reinstall_for_flags(self, forced_flags,
4931                 orig_use, orig_iuse, cur_use, cur_iuse):
4932                 """Return a set of flags that trigger reinstallation, or None if there
4933                 are no such flags."""
4934                 if "--newuse" in self.myopts:
4935                         flags = set(orig_iuse.symmetric_difference(
4936                                 cur_iuse).difference(forced_flags))
4937                         flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4938                                 cur_iuse.intersection(cur_use)))
4939                         if flags:
4940                                 return flags
4941                 elif "changed-use" == self.myopts.get("--reinstall"):
4942                         flags = orig_iuse.intersection(orig_use).symmetric_difference(
4943                                 cur_iuse.intersection(cur_use))
4944                         if flags:
4945                                 return flags
4946                 return None
4947
4948         def _create_graph(self, allow_unsatisfied=False):
4949                 dep_stack = self._dep_stack
4950                 while dep_stack:
4951                         self.spinner.update()
4952                         dep = dep_stack.pop()
4953                         if isinstance(dep, Package):
4954                                 if not self._add_pkg_deps(dep,
4955                                         allow_unsatisfied=allow_unsatisfied):
4956                                         return 0
4957                                 continue
4958                         if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4959                                 return 0
4960                 return 1
4961
4962         def _add_dep(self, dep, allow_unsatisfied=False):
4963                 debug = "--debug" in self.myopts
4964                 buildpkgonly = "--buildpkgonly" in self.myopts
4965                 nodeps = "--nodeps" in self.myopts
4966                 empty = "empty" in self.myparams
4967                 deep = "deep" in self.myparams
4968                 update = "--update" in self.myopts and dep.depth <= 1
4969                 if dep.blocker:
4970                         if not buildpkgonly and \
4971                                 not nodeps and \
4972                                 dep.parent not in self._slot_collision_nodes:
4973                                 if dep.parent.onlydeps:
4974                                         # It's safe to ignore blockers if the
4975                                         # parent is an --onlydeps node.
4976                                         return 1
4977                                 # The blocker applies to the root where
4978                                 # the parent is or will be installed.
4979                                 blocker = Blocker(atom=dep.atom,
4980                                         eapi=dep.parent.metadata["EAPI"],
4981                                         root=dep.parent.root)
4982                                 self._blocker_parents.add(blocker, dep.parent)
4983                         return 1
4984                 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
4985                         onlydeps=dep.onlydeps)
4986                 if not dep_pkg:
4987                         if dep.priority.optional:
4988                                 # This could be an unecessary build-time dep
4989                                 # pulled in by --with-bdeps=y.
4990                                 return 1
4991                         if allow_unsatisfied:
4992                                 self._unsatisfied_deps.append(dep)
4993                                 return 1
4994                         self._unsatisfied_deps_for_display.append(
4995                                 ((dep.root, dep.atom), {"myparent":dep.parent}))
4996                         return 0
4997                 # In some cases, dep_check will return deps that shouldn't
4998                 # be proccessed any further, so they are identified and
4999                 # discarded here. Try to discard as few as possible since
5000                 # discarded dependencies reduce the amount of information
5001                 # available for optimization of merge order.
5002                 if dep.priority.satisfied and \
5003                         not dep_pkg.installed and \
5004                         not (existing_node or empty or deep or update):
5005                         myarg = None
5006                         if dep.root == self.target_root:
5007                                 try:
5008                                         myarg = self._iter_atoms_for_pkg(dep_pkg).next()
5009                                 except StopIteration:
5010                                         pass
5011                                 except portage.exception.InvalidDependString:
5012                                         if not dep_pkg.installed:
5013                                                 # This shouldn't happen since the package
5014                                                 # should have been masked.
5015                                                 raise
5016                         if not myarg:
5017                                 self._ignored_deps.append(dep)
5018                                 return 1
5019
5020                 if not self._add_pkg(dep_pkg, dep):
5021                         return 0
5022                 return 1
5023
5024         def _add_pkg(self, pkg, dep):
5025                 myparent = None
5026                 priority = None
5027                 depth = 0
5028                 if dep is None:
5029                         dep = Dependency()
5030                 else:
5031                         myparent = dep.parent
5032                         priority = dep.priority
5033                         depth = dep.depth
5034                 if priority is None:
5035                         priority = DepPriority()
5036                 """
5037                 Fills the digraph with nodes comprised of packages to merge.
5038                 mybigkey is the package spec of the package to merge.
5039                 myparent is the package depending on mybigkey ( or None )
5040                 addme = Should we add this package to the digraph or are we just looking at it's deps?
5041                         Think --onlydeps, we need to ignore packages in that case.
5042                 #stuff to add:
5043                 #SLOT-aware emerge
5044                 #IUSE-aware emerge -> USE DEP aware depgraph
5045                 #"no downgrade" emerge
5046                 """
5047                 # Ensure that the dependencies of the same package
5048                 # are never processed more than once.
5049                 previously_added = pkg in self.digraph
5050
5051                 # select the correct /var database that we'll be checking against
5052                 vardbapi = self.trees[pkg.root]["vartree"].dbapi
5053                 pkgsettings = self.pkgsettings[pkg.root]
5054
5055                 arg_atoms = None
5056                 if True:
5057                         try:
5058                                 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
5059                         except portage.exception.InvalidDependString, e:
5060                                 if not pkg.installed:
5061                                         show_invalid_depstring_notice(
5062                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5063                                         return 0
5064                                 del e
5065
5066                 if not pkg.onlydeps:
5067                         if not pkg.installed and \
5068                                 "empty" not in self.myparams and \
5069                                 vardbapi.match(pkg.slot_atom):
5070                                 # Increase the priority of dependencies on packages that
5071                                 # are being rebuilt. This optimizes merge order so that
5072                                 # dependencies are rebuilt/updated as soon as possible,
5073                                 # which is needed especially when emerge is called by
5074                                 # revdep-rebuild since dependencies may be affected by ABI
5075                                 # breakage that has rendered them useless. Don't adjust
5076                                 # priority here when in "empty" mode since all packages
5077                                 # are being merged in that case.
5078                                 priority.rebuild = True
5079
5080                         existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
5081                         slot_collision = False
5082                         if existing_node:
5083                                 existing_node_matches = pkg.cpv == existing_node.cpv
5084                                 if existing_node_matches and \
5085                                         pkg != existing_node and \
5086                                         dep.atom is not None:
5087                                         # Use package set for matching since it will match via
5088                                         # PROVIDE when necessary, while match_from_list does not.
5089                                         atom_set = InternalPackageSet(initial_atoms=[dep.atom])
5090                                         if not atom_set.findAtomForPackage(existing_node):
5091                                                 existing_node_matches = False
5092                                 if existing_node_matches:
5093                                         # The existing node can be reused.
5094                                         if arg_atoms:
5095                                                 for parent_atom in arg_atoms:
5096                                                         parent, atom = parent_atom
5097                                                         self.digraph.add(existing_node, parent,
5098                                                                 priority=priority)
5099                                                         self._add_parent_atom(existing_node, parent_atom)
5100                                         # If a direct circular dependency is not an unsatisfied
5101                                         # buildtime dependency then drop it here since otherwise
5102                                         # it can skew the merge order calculation in an unwanted
5103                                         # way.
5104                                         if existing_node != myparent or \
5105                                                 (priority.buildtime and not priority.satisfied):
5106                                                 self.digraph.addnode(existing_node, myparent,
5107                                                         priority=priority)
5108                                                 if dep.atom is not None and dep.parent is not None:
5109                                                         self._add_parent_atom(existing_node,
5110                                                                 (dep.parent, dep.atom))
5111                                         return 1
5112                                 else:
5113
5114                                         # A slot collision has occurred.  Sometimes this coincides
5115                                         # with unresolvable blockers, so the slot collision will be
5116                                         # shown later if there are no unresolvable blockers.
5117                                         self._add_slot_conflict(pkg)
5118                                         slot_collision = True
5119
5120                         if slot_collision:
5121                                 # Now add this node to the graph so that self.display()
5122                                 # can show use flags and --tree portage.output.  This node is
5123                                 # only being partially added to the graph.  It must not be
5124                                 # allowed to interfere with the other nodes that have been
5125                                 # added.  Do not overwrite data for existing nodes in
5126                                 # self.mydbapi since that data will be used for blocker
5127                                 # validation.
5128                                 # Even though the graph is now invalid, continue to process
5129                                 # dependencies so that things like --fetchonly can still
5130                                 # function despite collisions.
5131                                 pass
5132                         elif not previously_added:
5133                                 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
5134                                 self.mydbapi[pkg.root].cpv_inject(pkg)
5135                                 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
5136
5137                         if not pkg.installed:
5138                                 # Allow this package to satisfy old-style virtuals in case it
5139                                 # doesn't already. Any pre-existing providers will be preferred
5140                                 # over this one.
5141                                 try:
5142                                         pkgsettings.setinst(pkg.cpv, pkg.metadata)
5143                                         # For consistency, also update the global virtuals.
5144                                         settings = self.roots[pkg.root].settings
5145                                         settings.unlock()
5146                                         settings.setinst(pkg.cpv, pkg.metadata)
5147                                         settings.lock()
5148                                 except portage.exception.InvalidDependString, e:
5149                                         show_invalid_depstring_notice(
5150                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5151                                         del e
5152                                         return 0
5153
5154                 if arg_atoms:
5155                         self._set_nodes.add(pkg)
5156
5157                 # Do this even when addme is False (--onlydeps) so that the
5158                 # parent/child relationship is always known in case
5159                 # self._show_slot_collision_notice() needs to be called later.
5160                 self.digraph.add(pkg, myparent, priority=priority)
5161                 if dep.atom is not None and dep.parent is not None:
5162                         self._add_parent_atom(pkg, (dep.parent, dep.atom))
5163
5164                 if arg_atoms:
5165                         for parent_atom in arg_atoms:
5166                                 parent, atom = parent_atom
5167                                 self.digraph.add(pkg, parent, priority=priority)
5168                                 self._add_parent_atom(pkg, parent_atom)
5169
5170                 """ This section determines whether we go deeper into dependencies or not.
5171                     We want to go deeper on a few occasions:
5172                     Installing package A, we need to make sure package A's deps are met.
5173                     emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5174                     If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5175                 """
5176                 dep_stack = self._dep_stack
5177                 if "recurse" not in self.myparams:
5178                         return 1
5179                 elif pkg.installed and \
5180                         "deep" not in self.myparams:
5181                         dep_stack = self._ignored_deps
5182
5183                 self.spinner.update()
5184
5185                 if arg_atoms:
5186                         depth = 0
5187                 pkg.depth = depth
5188                 if not previously_added:
5189                         dep_stack.append(pkg)
5190                 return 1
5191
5192         def _add_parent_atom(self, pkg, parent_atom):
5193                 parent_atoms = self._parent_atoms.get(pkg)
5194                 if parent_atoms is None:
5195                         parent_atoms = set()
5196                         self._parent_atoms[pkg] = parent_atoms
5197                 parent_atoms.add(parent_atom)
5198
5199         def _add_slot_conflict(self, pkg):
5200                 self._slot_collision_nodes.add(pkg)
5201                 slot_key = (pkg.slot_atom, pkg.root)
5202                 slot_nodes = self._slot_collision_info.get(slot_key)
5203                 if slot_nodes is None:
5204                         slot_nodes = set()
5205                         slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5206                         self._slot_collision_info[slot_key] = slot_nodes
5207                 slot_nodes.add(pkg)
5208
5209         def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5210
5211                 mytype = pkg.type_name
5212                 myroot = pkg.root
5213                 mykey = pkg.cpv
5214                 metadata = pkg.metadata
5215                 myuse = pkg.use.enabled
5216                 jbigkey = pkg
5217                 depth = pkg.depth + 1
5218                 removal_action = "remove" in self.myparams
5219
5220                 edepend={}
5221                 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5222                 for k in depkeys:
5223                         edepend[k] = metadata[k]
5224
5225                 if not pkg.built and \
5226                         "--buildpkgonly" in self.myopts and \
5227                         "deep" not in self.myparams and \
5228                         "empty" not in self.myparams:
5229                         edepend["RDEPEND"] = ""
5230                         edepend["PDEPEND"] = ""
5231                 bdeps_optional = False
5232
5233                 if pkg.built and not removal_action:
5234                         if self.myopts.get("--with-bdeps", "n") == "y":
5235                                 # Pull in build time deps as requested, but marked them as
5236                                 # "optional" since they are not strictly required. This allows
5237                                 # more freedom in the merge order calculation for solving
5238                                 # circular dependencies. Don't convert to PDEPEND since that
5239                                 # could make --with-bdeps=y less effective if it is used to
5240                                 # adjust merge order to prevent built_with_use() calls from
5241                                 # failing.
5242                                 bdeps_optional = True
5243                         else:
5244                                 # built packages do not have build time dependencies.
5245                                 edepend["DEPEND"] = ""
5246
5247                 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5248                         edepend["DEPEND"] = ""
5249
5250                 deps = (
5251                         ("/", edepend["DEPEND"],
5252                                 self._priority(buildtime=(not bdeps_optional),
5253                                 optional=bdeps_optional)),
5254                         (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5255                         (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5256                 )
5257
5258                 debug = "--debug" in self.myopts
5259                 strict = mytype != "installed"
5260                 try:
5261                         for dep_root, dep_string, dep_priority in deps:
5262                                 if not dep_string:
5263                                         continue
5264                                 if debug:
5265                                         print
5266                                         print "Parent:   ", jbigkey
5267                                         print "Depstring:", dep_string
5268                                         print "Priority:", dep_priority
5269                                 vardb = self.roots[dep_root].trees["vartree"].dbapi
5270                                 try:
5271                                         selected_atoms = self._select_atoms(dep_root,
5272                                                 dep_string, myuse=myuse, parent=pkg, strict=strict,
5273                                                 priority=dep_priority)
5274                                 except portage.exception.InvalidDependString, e:
5275                                         show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5276                                         return 0
5277                                 if debug:
5278                                         print "Candidates:", selected_atoms
5279
5280                                 for atom in selected_atoms:
5281                                         try:
5282
5283                                                 atom = portage.dep.Atom(atom)
5284
5285                                                 mypriority = dep_priority.copy()
5286                                                 if not atom.blocker and vardb.match(atom):
5287                                                         mypriority.satisfied = True
5288
5289                                                 if not self._add_dep(Dependency(atom=atom,
5290                                                         blocker=atom.blocker, depth=depth, parent=pkg,
5291                                                         priority=mypriority, root=dep_root),
5292                                                         allow_unsatisfied=allow_unsatisfied):
5293                                                         return 0
5294
5295                                         except portage.exception.InvalidAtom, e:
5296                                                 show_invalid_depstring_notice(
5297                                                         pkg, dep_string, str(e))
5298                                                 del e
5299                                                 if not pkg.installed:
5300                                                         return 0
5301
5302                                 if debug:
5303                                         print "Exiting...", jbigkey
5304                 except portage.exception.AmbiguousPackageName, e:
5305                         pkgs = e.args[0]
5306                         portage.writemsg("\n\n!!! An atom in the dependencies " + \
5307                                 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5308                         for cpv in pkgs:
5309                                 portage.writemsg("    %s\n" % cpv, noiselevel=-1)
5310                         portage.writemsg("\n", noiselevel=-1)
5311                         if mytype == "binary":
5312                                 portage.writemsg(
5313                                         "!!! This binary package cannot be installed: '%s'\n" % \
5314                                         mykey, noiselevel=-1)
5315                         elif mytype == "ebuild":
5316                                 portdb = self.roots[myroot].trees["porttree"].dbapi
5317                                 myebuild, mylocation = portdb.findname2(mykey)
5318                                 portage.writemsg("!!! This ebuild cannot be installed: " + \
5319                                         "'%s'\n" % myebuild, noiselevel=-1)
5320                         portage.writemsg("!!! Please notify the package maintainer " + \
5321                                 "that atoms must be fully-qualified.\n", noiselevel=-1)
5322                         return 0
5323                 return 1
5324
5325         def _priority(self, **kwargs):
5326                 if "remove" in self.myparams:
5327                         priority_constructor = UnmergeDepPriority
5328                 else:
5329                         priority_constructor = DepPriority
5330                 return priority_constructor(**kwargs)
5331
5332         def _dep_expand(self, root_config, atom_without_category):
5333                 """
5334                 @param root_config: a root config instance
5335                 @type root_config: RootConfig
5336                 @param atom_without_category: an atom without a category component
5337                 @type atom_without_category: String
5338                 @rtype: list
5339                 @returns: a list of atoms containing categories (possibly empty)
5340                 """
5341                 null_cp = portage.dep_getkey(insert_category_into_atom(
5342                         atom_without_category, "null"))
5343                 cat, atom_pn = portage.catsplit(null_cp)
5344
5345                 dbs = self._filtered_trees[root_config.root]["dbs"]
5346                 categories = set()
5347                 for db, pkg_type, built, installed, db_keys in dbs:
5348                         for cat in db.categories:
5349                                 if db.cp_list("%s/%s" % (cat, atom_pn)):
5350                                         categories.add(cat)
5351
5352                 deps = []
5353                 for cat in categories:
5354                         deps.append(insert_category_into_atom(
5355                                 atom_without_category, cat))
5356                 return deps
5357
5358         def _have_new_virt(self, root, atom_cp):
5359                 ret = False
5360                 for db, pkg_type, built, installed, db_keys in \
5361                         self._filtered_trees[root]["dbs"]:
5362                         if db.cp_list(atom_cp):
5363                                 ret = True
5364                                 break
5365                 return ret
5366
5367         def _iter_atoms_for_pkg(self, pkg):
5368                 # TODO: add multiple $ROOT support
5369                 if pkg.root != self.target_root:
5370                         return
5371                 atom_arg_map = self._atom_arg_map
5372                 root_config = self.roots[pkg.root]
5373                 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5374                         atom_cp = portage.dep_getkey(atom)
5375                         if atom_cp != pkg.cp and \
5376                                 self._have_new_virt(pkg.root, atom_cp):
5377                                 continue
5378                         visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5379                         visible_pkgs.reverse() # descending order
5380                         higher_slot = None
5381                         for visible_pkg in visible_pkgs:
5382                                 if visible_pkg.cp != atom_cp:
5383                                         continue
5384                                 if pkg >= visible_pkg:
5385                                         # This is descending order, and we're not
5386                                         # interested in any versions <= pkg given.
5387                                         break
5388                                 if pkg.slot_atom != visible_pkg.slot_atom:
5389                                         higher_slot = visible_pkg
5390                                         break
5391                         if higher_slot is not None:
5392                                 continue
5393                         for arg in atom_arg_map[(atom, pkg.root)]:
5394                                 if isinstance(arg, PackageArg) and \
5395                                         arg.package != pkg:
5396                                         continue
5397                                 yield arg, atom
5398
5399         def select_files(self, myfiles):
5400                 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5401                 appropriate depgraph and return a favorite list."""
5402                 debug = "--debug" in self.myopts
5403                 root_config = self.roots[self.target_root]
5404                 sets = root_config.sets
5405                 getSetAtoms = root_config.setconfig.getSetAtoms
5406                 myfavorites=[]
5407                 myroot = self.target_root
5408                 dbs = self._filtered_trees[myroot]["dbs"]
5409                 vardb = self.trees[myroot]["vartree"].dbapi
5410                 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5411                 portdb = self.trees[myroot]["porttree"].dbapi
5412                 bindb = self.trees[myroot]["bintree"].dbapi
5413                 pkgsettings = self.pkgsettings[myroot]
5414                 args = []
5415                 onlydeps = "--onlydeps" in self.myopts
5416                 lookup_owners = []
5417                 for x in myfiles:
5418                         ext = os.path.splitext(x)[1]
5419                         if ext==".tbz2":
5420                                 if not os.path.exists(x):
5421                                         if os.path.exists(
5422                                                 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5423                                                 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5424                                         elif os.path.exists(
5425                                                 os.path.join(pkgsettings["PKGDIR"], x)):
5426                                                 x = os.path.join(pkgsettings["PKGDIR"], x)
5427                                         else:
5428                                                 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5429                                                 print "!!! Please ensure the tbz2 exists as specified.\n"
5430                                                 return 0, myfavorites
5431                                 mytbz2=portage.xpak.tbz2(x)
5432                                 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5433                                 if os.path.realpath(x) != \
5434                                         os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5435                                         print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5436                                         return 0, myfavorites
5437                                 db_keys = list(bindb._aux_cache_keys)
5438                                 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5439                                 pkg = Package(type_name="binary", root_config=root_config,
5440                                         cpv=mykey, built=True, metadata=metadata,
5441                                         onlydeps=onlydeps)
5442                                 self._pkg_cache[pkg] = pkg
5443                                 args.append(PackageArg(arg=x, package=pkg,
5444                                         root_config=root_config))
5445                         elif ext==".ebuild":
5446                                 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5447                                 pkgdir = os.path.dirname(ebuild_path)
5448                                 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5449                                 cp = pkgdir[len(tree_root)+1:]
5450                                 e = portage.exception.PackageNotFound(
5451                                         ("%s is not in a valid portage tree " + \
5452                                         "hierarchy or does not exist") % x)
5453                                 if not portage.isvalidatom(cp):
5454                                         raise e
5455                                 cat = portage.catsplit(cp)[0]
5456                                 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5457                                 if not portage.isvalidatom("="+mykey):
5458                                         raise e
5459                                 ebuild_path = portdb.findname(mykey)
5460                                 if ebuild_path:
5461                                         if ebuild_path != os.path.join(os.path.realpath(tree_root),
5462                                                 cp, os.path.basename(ebuild_path)):
5463                                                 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5464                                                 return 0, myfavorites
5465                                         if mykey not in portdb.xmatch(
5466                                                 "match-visible", portage.dep_getkey(mykey)):
5467                                                 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5468                                                 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5469                                                 print colorize("BAD", "*** page for details.")
5470                                                 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5471                                                         "Continuing...")
5472                                 else:
5473                                         raise portage.exception.PackageNotFound(
5474                                                 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5475                                 db_keys = list(portdb._aux_cache_keys)
5476                                 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5477                                 pkg = Package(type_name="ebuild", root_config=root_config,
5478                                         cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5479                                 pkgsettings.setcpv(pkg)
5480                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5481                                 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
5482                                 self._pkg_cache[pkg] = pkg
5483                                 args.append(PackageArg(arg=x, package=pkg,
5484                                         root_config=root_config))
5485                         elif x.startswith(os.path.sep):
5486                                 if not x.startswith(myroot):
5487                                         portage.writemsg(("\n\n!!! '%s' does not start with" + \
5488                                                 " $ROOT.\n") % x, noiselevel=-1)
5489                                         return 0, []
5490                                 # Queue these up since it's most efficient to handle
5491                                 # multiple files in a single iter_owners() call.
5492                                 lookup_owners.append(x)
5493                         else:
5494                                 if x in ("system", "world"):
5495                                         x = SETPREFIX + x
5496                                 if x.startswith(SETPREFIX):
5497                                         s = x[len(SETPREFIX):]
5498                                         if s not in sets:
5499                                                 raise portage.exception.PackageSetNotFound(s)
5500                                         if s in self._sets:
5501                                                 continue
5502                                         # Recursively expand sets so that containment tests in
5503                                         # self._get_parent_sets() properly match atoms in nested
5504                                         # sets (like if world contains system).
5505                                         expanded_set = InternalPackageSet(
5506                                                 initial_atoms=getSetAtoms(s))
5507                                         self._sets[s] = expanded_set
5508                                         args.append(SetArg(arg=x, set=expanded_set,
5509                                                 root_config=root_config))
5510                                         continue
5511                                 if not is_valid_package_atom(x):
5512                                         portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5513                                                 noiselevel=-1)
5514                                         portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5515                                         portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5516                                         return (0,[])
5517                                 # Don't expand categories or old-style virtuals here unless
5518                                 # necessary. Expansion of old-style virtuals here causes at
5519                                 # least the following problems:
5520                                 #   1) It's more difficult to determine which set(s) an atom
5521                                 #      came from, if any.
5522                                 #   2) It takes away freedom from the resolver to choose other
5523                                 #      possible expansions when necessary.
5524                                 if "/" in x:
5525                                         args.append(AtomArg(arg=x, atom=x,
5526                                                 root_config=root_config))
5527                                         continue
5528                                 expanded_atoms = self._dep_expand(root_config, x)
5529                                 installed_cp_set = set()
5530                                 for atom in expanded_atoms:
5531                                         atom_cp = portage.dep_getkey(atom)
5532                                         if vardb.cp_list(atom_cp):
5533                                                 installed_cp_set.add(atom_cp)
5534                                 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5535                                         installed_cp = iter(installed_cp_set).next()
5536                                         expanded_atoms = [atom for atom in expanded_atoms \
5537                                                 if portage.dep_getkey(atom) == installed_cp]
5538
5539                                 if len(expanded_atoms) > 1:
5540                                         print
5541                                         print
5542                                         ambiguous_package_name(x, expanded_atoms, root_config,
5543                                                 self.spinner, self.myopts)
5544                                         return False, myfavorites
5545                                 if expanded_atoms:
5546                                         atom = expanded_atoms[0]
5547                                 else:
5548                                         null_atom = insert_category_into_atom(x, "null")
5549                                         null_cp = portage.dep_getkey(null_atom)
5550                                         cat, atom_pn = portage.catsplit(null_cp)
5551                                         virts_p = root_config.settings.get_virts_p().get(atom_pn)
5552                                         if virts_p:
5553                                                 # Allow the depgraph to choose which virtual.
5554                                                 atom = insert_category_into_atom(x, "virtual")
5555                                         else:
5556                                                 atom = insert_category_into_atom(x, "null")
5557
5558                                 args.append(AtomArg(arg=x, atom=atom,
5559                                         root_config=root_config))
5560
5561                 if lookup_owners:
5562                         relative_paths = []
5563                         search_for_multiple = False
5564                         if len(lookup_owners) > 1:
5565                                 search_for_multiple = True
5566
5567                         for x in lookup_owners:
5568                                 if not search_for_multiple and os.path.isdir(x):
5569                                         search_for_multiple = True
5570                                 relative_paths.append(x[len(myroot):])
5571
5572                         owners = set()
5573                         for pkg, relative_path in \
5574                                 real_vardb._owners.iter_owners(relative_paths):
5575                                 owners.add(pkg.mycpv)
5576                                 if not search_for_multiple:
5577                                         break
5578
5579                         if not owners:
5580                                 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5581                                         "by any package.\n") % lookup_owners[0], noiselevel=-1)
5582                                 return 0, []
5583
5584                         for cpv in owners:
5585                                 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5586                                 if not slot:
5587                                         # portage now masks packages with missing slot, but it's
5588                                         # possible that one was installed by an older version
5589                                         atom = portage.cpv_getkey(cpv)
5590                                 else:
5591                                         atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5592                                 args.append(AtomArg(arg=atom, atom=atom,
5593                                         root_config=root_config))
5594
5595                 if "--update" in self.myopts:
5596                         # In some cases, the greedy slots behavior can pull in a slot that
5597                         # the user would want to uninstall due to it being blocked by a
5598                         # newer version in a different slot. Therefore, it's necessary to
5599                         # detect and discard any that should be uninstalled. Each time
5600                         # that arguments are updated, package selections are repeated in
5601                         # order to ensure consistency with the current arguments:
5602                         #
5603                         #  1) Initialize args
5604                         #  2) Select packages and generate initial greedy atoms
5605                         #  3) Update args with greedy atoms
5606                         #  4) Select packages and generate greedy atoms again, while
5607                         #     accounting for any blockers between selected packages
5608                         #  5) Update args with revised greedy atoms
5609
5610                         self._set_args(args)
5611                         greedy_args = []
5612                         for arg in args:
5613                                 greedy_args.append(arg)
5614                                 if not isinstance(arg, AtomArg):
5615                                         continue
5616                                 for atom in self._greedy_slots(arg.root_config, arg.atom):
5617                                         greedy_args.append(
5618                                                 AtomArg(arg=arg.arg, atom=atom,
5619                                                         root_config=arg.root_config))
5620
5621                         self._set_args(greedy_args)
5622                         del greedy_args
5623
5624                         # Revise greedy atoms, accounting for any blockers
5625                         # between selected packages.
5626                         revised_greedy_args = []
5627                         for arg in args:
5628                                 revised_greedy_args.append(arg)
5629                                 if not isinstance(arg, AtomArg):
5630                                         continue
5631                                 for atom in self._greedy_slots(arg.root_config, arg.atom,
5632                                         blocker_lookahead=True):
5633                                         revised_greedy_args.append(
5634                                                 AtomArg(arg=arg.arg, atom=atom,
5635                                                         root_config=arg.root_config))
5636                         args = revised_greedy_args
5637                         del revised_greedy_args
5638
5639                 self._set_args(args)
5640
5641                 myfavorites = set(myfavorites)
5642                 for arg in args:
5643                         if isinstance(arg, (AtomArg, PackageArg)):
5644                                 myfavorites.add(arg.atom)
5645                         elif isinstance(arg, SetArg):
5646                                 myfavorites.add(arg.arg)
5647                 myfavorites = list(myfavorites)
5648
5649                 pprovideddict = pkgsettings.pprovideddict
5650                 if debug:
5651                         portage.writemsg("\n", noiselevel=-1)
5652                 # Order needs to be preserved since a feature of --nodeps
5653                 # is to allow the user to force a specific merge order.
5654                 args.reverse()
5655                 while args:
5656                         arg = args.pop()
5657                         for atom in arg.set:
5658                                 self.spinner.update()
5659                                 dep = Dependency(atom=atom, onlydeps=onlydeps,
5660                                         root=myroot, parent=arg)
5661                                 atom_cp = portage.dep_getkey(atom)
5662                                 try:
5663                                         pprovided = pprovideddict.get(portage.dep_getkey(atom))
5664                                         if pprovided and portage.match_from_list(atom, pprovided):
5665                                                 # A provided package has been specified on the command line.
5666                                                 self._pprovided_args.append((arg, atom))
5667                                                 continue
5668                                         if isinstance(arg, PackageArg):
5669                                                 if not self._add_pkg(arg.package, dep) or \
5670                                                         not self._create_graph():
5671                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5672                                                                 "dependencies for %s\n") % arg.arg)
5673                                                         return 0, myfavorites
5674                                                 continue
5675                                         if debug:
5676                                                 portage.writemsg("      Arg: %s\n     Atom: %s\n" % \
5677                                                         (arg, atom), noiselevel=-1)
5678                                         pkg, existing_node = self._select_package(
5679                                                 myroot, atom, onlydeps=onlydeps)
5680                                         if not pkg:
5681                                                 if not (isinstance(arg, SetArg) and \
5682                                                         arg.name in ("system", "world")):
5683                                                         self._unsatisfied_deps_for_display.append(
5684                                                                 ((myroot, atom), {}))
5685                                                         return 0, myfavorites
5686                                                 self._missing_args.append((arg, atom))
5687                                                 continue
5688                                         if atom_cp != pkg.cp:
5689                                                 # For old-style virtuals, we need to repeat the
5690                                                 # package.provided check against the selected package.
5691                                                 expanded_atom = atom.replace(atom_cp, pkg.cp)
5692                                                 pprovided = pprovideddict.get(pkg.cp)
5693                                                 if pprovided and \
5694                                                         portage.match_from_list(expanded_atom, pprovided):
5695                                                         # A provided package has been
5696                                                         # specified on the command line.
5697                                                         self._pprovided_args.append((arg, atom))
5698                                                         continue
5699                                         if pkg.installed and "selective" not in self.myparams:
5700                                                 self._unsatisfied_deps_for_display.append(
5701                                                         ((myroot, atom), {}))
5702                                                 # Previous behavior was to bail out in this case, but
5703                                                 # since the dep is satisfied by the installed package,
5704                                                 # it's more friendly to continue building the graph
5705                                                 # and just show a warning message. Therefore, only bail
5706                                                 # out here if the atom is not from either the system or
5707                                                 # world set.
5708                                                 if not (isinstance(arg, SetArg) and \
5709                                                         arg.name in ("system", "world")):
5710                                                         return 0, myfavorites
5711
5712                                         # Add the selected package to the graph as soon as possible
5713                                         # so that later dep_check() calls can use it as feedback
5714                                         # for making more consistent atom selections.
5715                                         if not self._add_pkg(pkg, dep):
5716                                                 if isinstance(arg, SetArg):
5717                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5718                                                                 "dependencies for %s from %s\n") % \
5719                                                                 (atom, arg.arg))
5720                                                 else:
5721                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5722                                                                 "dependencies for %s\n") % atom)
5723                                                 return 0, myfavorites
5724
5725                                 except portage.exception.MissingSignature, e:
5726                                         portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5727                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5728                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5729                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5730                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5731                                         return 0, myfavorites
5732                                 except portage.exception.InvalidSignature, e:
5733                                         portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5734                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5735                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5736                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5737                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5738                                         return 0, myfavorites
5739                                 except SystemExit, e:
5740                                         raise # Needed else can't exit
5741                                 except Exception, e:
5742                                         print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5743                                         print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5744                                         raise
5745
5746                 # Now that the root packages have been added to the graph,
5747                 # process the dependencies.
5748                 if not self._create_graph():
5749                         return 0, myfavorites
5750
5751                 missing=0
5752                 if "--usepkgonly" in self.myopts:
5753                         for xs in self.digraph.all_nodes():
5754                                 if not isinstance(xs, Package):
5755                                         continue
5756                                 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5757                                         if missing == 0:
5758                                                 print
5759                                         missing += 1
5760                                         print "Missing binary for:",xs[2]
5761
5762                 try:
5763                         self.altlist()
5764                 except self._unknown_internal_error:
5765                         return False, myfavorites
5766
5767                 # We're true here unless we are missing binaries.
5768                 return (not missing,myfavorites)
5769
5770         def _set_args(self, args):
5771                 """
5772                 Create the "args" package set from atoms and packages given as
5773                 arguments. This method can be called multiple times if necessary.
5774                 The package selection cache is automatically invalidated, since
5775                 arguments influence package selections.
5776                 """
5777                 args_set = self._sets["args"]
5778                 args_set.clear()
5779                 for arg in args:
5780                         if not isinstance(arg, (AtomArg, PackageArg)):
5781                                 continue
5782                         atom = arg.atom
5783                         if atom in args_set:
5784                                 continue
5785                         args_set.add(atom)
5786
5787                 self._set_atoms.clear()
5788                 self._set_atoms.update(chain(*self._sets.itervalues()))
5789                 atom_arg_map = self._atom_arg_map
5790                 atom_arg_map.clear()
5791                 for arg in args:
5792                         for atom in arg.set:
5793                                 atom_key = (atom, arg.root_config.root)
5794                                 refs = atom_arg_map.get(atom_key)
5795                                 if refs is None:
5796                                         refs = []
5797                                         atom_arg_map[atom_key] = refs
5798                                         if arg not in refs:
5799                                                 refs.append(arg)
5800
5801                 # Invalidate the package selection cache, since
5802                 # arguments influence package selections.
5803                 self._highest_pkg_cache.clear()
5804                 for trees in self._filtered_trees.itervalues():
5805                         trees["porttree"].dbapi._clear_cache()
5806
5807         def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5808                 """
5809                 Return a list of slot atoms corresponding to installed slots that
5810                 differ from the slot of the highest visible match. When
5811                 blocker_lookahead is True, slot atoms that would trigger a blocker
5812                 conflict are automatically discarded, potentially allowing automatic
5813                 uninstallation of older slots when appropriate.
5814                 """
5815                 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5816                 if highest_pkg is None:
5817                         return []
5818                 vardb = root_config.trees["vartree"].dbapi
5819                 slots = set()
5820                 for cpv in vardb.match(atom):
5821                         # don't mix new virtuals with old virtuals
5822                         if portage.cpv_getkey(cpv) == highest_pkg.cp:
5823                                 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5824
5825                 slots.add(highest_pkg.metadata["SLOT"])
5826                 if len(slots) == 1:
5827                         return []
5828                 greedy_pkgs = []
5829                 slots.remove(highest_pkg.metadata["SLOT"])
5830                 while slots:
5831                         slot = slots.pop()
5832                         slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5833                         pkg, in_graph = self._select_package(root_config.root, slot_atom)
5834                         if pkg is not None and \
5835                                 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
5836                                 greedy_pkgs.append(pkg)
5837                 if not greedy_pkgs:
5838                         return []
5839                 if not blocker_lookahead:
5840                         return [pkg.slot_atom for pkg in greedy_pkgs]
5841
5842                 blockers = {}
5843                 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5844                 for pkg in greedy_pkgs + [highest_pkg]:
5845                         dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5846                         try:
5847                                 atoms = self._select_atoms(
5848                                         pkg.root, dep_str, pkg.use.enabled,
5849                                         parent=pkg, strict=True)
5850                         except portage.exception.InvalidDependString:
5851                                 continue
5852                         blocker_atoms = (x for x in atoms if x.blocker)
5853                         blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5854
5855                 if highest_pkg not in blockers:
5856                         return []
5857
5858                 # filter packages with invalid deps
5859                 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5860
5861                 # filter packages that conflict with highest_pkg
5862                 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5863                         (blockers[highest_pkg].findAtomForPackage(pkg) or \
5864                         blockers[pkg].findAtomForPackage(highest_pkg))]
5865
5866                 if not greedy_pkgs:
5867                         return []
5868
5869                 # If two packages conflict, discard the lower version.
5870                 discard_pkgs = set()
5871                 greedy_pkgs.sort(reverse=True)
5872                 for i in xrange(len(greedy_pkgs) - 1):
5873                         pkg1 = greedy_pkgs[i]
5874                         if pkg1 in discard_pkgs:
5875                                 continue
5876                         for j in xrange(i + 1, len(greedy_pkgs)):
5877                                 pkg2 = greedy_pkgs[j]
5878                                 if pkg2 in discard_pkgs:
5879                                         continue
5880                                 if blockers[pkg1].findAtomForPackage(pkg2) or \
5881                                         blockers[pkg2].findAtomForPackage(pkg1):
5882                                         # pkg1 > pkg2
5883                                         discard_pkgs.add(pkg2)
5884
5885                 return [pkg.slot_atom for pkg in greedy_pkgs \
5886                         if pkg not in discard_pkgs]
5887
5888         def _select_atoms_from_graph(self, *pargs, **kwargs):
5889                 """
5890                 Prefer atoms matching packages that have already been
5891                 added to the graph or those that are installed and have
5892                 not been scheduled for replacement.
5893                 """
5894                 kwargs["trees"] = self._graph_trees
5895                 return self._select_atoms_highest_available(*pargs, **kwargs)
5896
5897         def _select_atoms_highest_available(self, root, depstring,
5898                 myuse=None, parent=None, strict=True, trees=None, priority=None):
5899                 """This will raise InvalidDependString if necessary. If trees is
5900                 None then self._filtered_trees is used."""
5901                 pkgsettings = self.pkgsettings[root]
5902                 if trees is None:
5903                         trees = self._filtered_trees
5904                 if not getattr(priority, "buildtime", False):
5905                         # The parent should only be passed to dep_check() for buildtime
5906                         # dependencies since that's the only case when it's appropriate
5907                         # to trigger the circular dependency avoidance code which uses it.
5908                         # It's important not to trigger the same circular dependency
5909                         # avoidance code for runtime dependencies since it's not needed
5910                         # and it can promote an incorrect package choice.
5911                         parent = None
5912                 if True:
5913                         try:
5914                                 if parent is not None:
5915                                         trees[root]["parent"] = parent
5916                                 if not strict:
5917                                         portage.dep._dep_check_strict = False
5918                                 mycheck = portage.dep_check(depstring, None,
5919                                         pkgsettings, myuse=myuse,
5920                                         myroot=root, trees=trees)
5921                         finally:
5922                                 if parent is not None:
5923                                         trees[root].pop("parent")
5924                                 portage.dep._dep_check_strict = True
5925                         if not mycheck[0]:
5926                                 raise portage.exception.InvalidDependString(mycheck[1])
5927                         selected_atoms = mycheck[1]
5928                 return selected_atoms
5929
5930         def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5931                 atom = portage.dep.Atom(atom)
5932                 atom_set = InternalPackageSet(initial_atoms=(atom,))
5933                 atom_without_use = atom
5934                 if atom.use:
5935                         atom_without_use = portage.dep.remove_slot(atom)
5936                         if atom.slot:
5937                                 atom_without_use += ":" + atom.slot
5938                         atom_without_use = portage.dep.Atom(atom_without_use)
5939                 xinfo = '"%s"' % atom
5940                 if arg:
5941                         xinfo='"%s"' % arg
5942                 # Discard null/ from failed cpv_expand category expansion.
5943                 xinfo = xinfo.replace("null/", "")
5944                 masked_packages = []
5945                 missing_use = []
5946                 masked_pkg_instances = set()
5947                 missing_licenses = []
5948                 have_eapi_mask = False
5949                 pkgsettings = self.pkgsettings[root]
5950                 implicit_iuse = pkgsettings._get_implicit_iuse()
5951                 root_config = self.roots[root]
5952                 portdb = self.roots[root].trees["porttree"].dbapi
5953                 dbs = self._filtered_trees[root]["dbs"]
5954                 for db, pkg_type, built, installed, db_keys in dbs:
5955                         if installed:
5956                                 continue
5957                         match = db.match
5958                         if hasattr(db, "xmatch"):
5959                                 cpv_list = db.xmatch("match-all", atom_without_use)
5960                         else:
5961                                 cpv_list = db.match(atom_without_use)
5962                         # descending order
5963                         cpv_list.reverse()
5964                         for cpv in cpv_list:
5965                                 metadata, mreasons  = get_mask_info(root_config, cpv,
5966                                         pkgsettings, db, pkg_type, built, installed, db_keys)
5967                                 if metadata is not None:
5968                                         pkg = Package(built=built, cpv=cpv,
5969                                                 installed=installed, metadata=metadata,
5970                                                 root_config=root_config)
5971                                         if pkg.cp != atom.cp:
5972                                                 # A cpv can be returned from dbapi.match() as an
5973                                                 # old-style virtual match even in cases when the
5974                                                 # package does not actually PROVIDE the virtual.
5975                                                 # Filter out any such false matches here.
5976                                                 if not atom_set.findAtomForPackage(pkg):
5977                                                         continue
5978                                         if mreasons:
5979                                                 masked_pkg_instances.add(pkg)
5980                                         if atom.use:
5981                                                 missing_use.append(pkg)
5982                                                 if not mreasons:
5983                                                         continue
5984                                 masked_packages.append(
5985                                         (root_config, pkgsettings, cpv, metadata, mreasons))
5986
5987                 missing_use_reasons = []
5988                 missing_iuse_reasons = []
5989                 for pkg in missing_use:
5990                         use = pkg.use.enabled
5991                         iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
5992                         iuse_re = re.compile("^(%s)$" % "|".join(iuse))
5993                         missing_iuse = []
5994                         for x in atom.use.required:
5995                                 if iuse_re.match(x) is None:
5996                                         missing_iuse.append(x)
5997                         mreasons = []
5998                         if missing_iuse:
5999                                 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
6000                                 missing_iuse_reasons.append((pkg, mreasons))
6001                         else:
6002                                 need_enable = sorted(atom.use.enabled.difference(use))
6003                                 need_disable = sorted(atom.use.disabled.intersection(use))
6004                                 if need_enable or need_disable:
6005                                         changes = []
6006                                         changes.extend(colorize("red", "+" + x) \
6007                                                 for x in need_enable)
6008                                         changes.extend(colorize("blue", "-" + x) \
6009                                                 for x in need_disable)
6010                                         mreasons.append("Change USE: %s" % " ".join(changes))
6011                                         missing_use_reasons.append((pkg, mreasons))
6012
6013                 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6014                         in missing_use_reasons if pkg not in masked_pkg_instances]
6015
6016                 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6017                         in missing_iuse_reasons if pkg not in masked_pkg_instances]
6018
6019                 show_missing_use = False
6020                 if unmasked_use_reasons:
6021                         # Only show the latest version.
6022                         show_missing_use = unmasked_use_reasons[:1]
6023                 elif unmasked_iuse_reasons:
6024                         if missing_use_reasons:
6025                                 # All packages with required IUSE are masked,
6026                                 # so display a normal masking message.
6027                                 pass
6028                         else:
6029                                 show_missing_use = unmasked_iuse_reasons
6030
6031                 if show_missing_use:
6032                         print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
6033                         print "!!! One of the following packages is required to complete your request:"
6034                         for pkg, mreasons in show_missing_use:
6035                                 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
6036
6037                 elif masked_packages:
6038                         print "\n!!! " + \
6039                                 colorize("BAD", "All ebuilds that could satisfy ") + \
6040                                 colorize("INFORM", xinfo) + \
6041                                 colorize("BAD", " have been masked.")
6042                         print "!!! One of the following masked packages is required to complete your request:"
6043                         have_eapi_mask = show_masked_packages(masked_packages)
6044                         if have_eapi_mask:
6045                                 print
6046                                 msg = ("The current version of portage supports " + \
6047                                         "EAPI '%s'. You must upgrade to a newer version" + \
6048                                         " of portage before EAPI masked packages can" + \
6049                                         " be installed.") % portage.const.EAPI
6050                                 from textwrap import wrap
6051                                 for line in wrap(msg, 75):
6052                                         print line
6053                         print
6054                         show_mask_docs()
6055                 else:
6056                         print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
6057
6058                 # Show parent nodes and the argument that pulled them in.
6059                 traversed_nodes = set()
6060                 node = myparent
6061                 msg = []
6062                 while node is not None:
6063                         traversed_nodes.add(node)
6064                         msg.append('(dependency required by "%s" [%s])' % \
6065                                 (colorize('INFORM', str(node.cpv)), node.type_name))
6066                         # When traversing to parents, prefer arguments over packages
6067                         # since arguments are root nodes. Never traverse the same
6068                         # package twice, in order to prevent an infinite loop.
6069                         selected_parent = None
6070                         for parent in self.digraph.parent_nodes(node):
6071                                 if isinstance(parent, DependencyArg):
6072                                         msg.append('(dependency required by "%s" [argument])' % \
6073                                                 (colorize('INFORM', str(parent))))
6074                                         selected_parent = None
6075                                         break
6076                                 if parent not in traversed_nodes:
6077                                         selected_parent = parent
6078                         node = selected_parent
6079                 for line in msg:
6080                         print line
6081
6082                 print
6083
6084         def _select_pkg_highest_available(self, root, atom, onlydeps=False):
6085                 cache_key = (root, atom, onlydeps)
6086                 ret = self._highest_pkg_cache.get(cache_key)
6087                 if ret is not None:
6088                         pkg, existing = ret
6089                         if pkg and not existing:
6090                                 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
6091                                 if existing and existing == pkg:
6092                                         # Update the cache to reflect that the
6093                                         # package has been added to the graph.
6094                                         ret = pkg, pkg
6095                                         self._highest_pkg_cache[cache_key] = ret
6096                         return ret
6097                 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
6098                 self._highest_pkg_cache[cache_key] = ret
6099                 pkg, existing = ret
6100                 if pkg is not None:
6101                         settings = pkg.root_config.settings
6102                         if visible(settings, pkg) and not (pkg.installed and \
6103                                 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
6104                                 pkg.root_config.visible_pkgs.cpv_inject(pkg)
6105                 return ret
6106
6107         def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
6108                 root_config = self.roots[root]
6109                 pkgsettings = self.pkgsettings[root]
6110                 dbs = self._filtered_trees[root]["dbs"]
6111                 vardb = self.roots[root].trees["vartree"].dbapi
6112                 portdb = self.roots[root].trees["porttree"].dbapi
6113                 # List of acceptable packages, ordered by type preference.
6114                 matched_packages = []
6115                 highest_version = None
6116                 if not isinstance(atom, portage.dep.Atom):
6117                         atom = portage.dep.Atom(atom)
6118                 atom_cp = atom.cp
6119                 atom_set = InternalPackageSet(initial_atoms=(atom,))
6120                 existing_node = None
6121                 myeb = None
6122                 usepkgonly = "--usepkgonly" in self.myopts
6123                 empty = "empty" in self.myparams
6124                 selective = "selective" in self.myparams
6125                 reinstall = False
6126                 noreplace = "--noreplace" in self.myopts
6127                 # Behavior of the "selective" parameter depends on
6128                 # whether or not a package matches an argument atom.
6129                 # If an installed package provides an old-style
6130                 # virtual that is no longer provided by an available
6131                 # package, the installed package may match an argument
6132                 # atom even though none of the available packages do.
6133                 # Therefore, "selective" logic does not consider
6134                 # whether or not an installed package matches an
6135                 # argument atom. It only considers whether or not
6136                 # available packages match argument atoms, which is
6137                 # represented by the found_available_arg flag.
6138                 found_available_arg = False
6139                 for find_existing_node in True, False:
6140                         if existing_node:
6141                                 break
6142                         for db, pkg_type, built, installed, db_keys in dbs:
6143                                 if existing_node:
6144                                         break
6145                                 if installed and not find_existing_node:
6146                                         want_reinstall = reinstall or empty or \
6147                                                 (found_available_arg and not selective)
6148                                         if want_reinstall and matched_packages:
6149                                                 continue
6150                                 if hasattr(db, "xmatch"):
6151                                         cpv_list = db.xmatch("match-all", atom)
6152                                 else:
6153                                         cpv_list = db.match(atom)
6154
6155                                 # USE=multislot can make an installed package appear as if
6156                                 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
6157                                 # won't do any good as long as USE=multislot is enabled since
6158                                 # the newly built package still won't have the expected slot.
6159                                 # Therefore, assume that such SLOT dependencies are already
6160                                 # satisfied rather than forcing a rebuild.
6161                                 if installed and not cpv_list and atom.slot:
6162                                         for cpv in db.match(atom.cp):
6163                                                 slot_available = False
6164                                                 for other_db, other_type, other_built, \
6165                                                         other_installed, other_keys in dbs:
6166                                                         try:
6167                                                                 if atom.slot == \
6168                                                                         other_db.aux_get(cpv, ["SLOT"])[0]:
6169                                                                         slot_available = True
6170                                                                         break
6171                                                         except KeyError:
6172                                                                 pass
6173                                                 if not slot_available:
6174                                                         continue
6175                                                 inst_pkg = self._pkg(cpv, "installed",
6176                                                         root_config, installed=installed)
6177                                                 # Remove the slot from the atom and verify that
6178                                                 # the package matches the resulting atom.
6179                                                 atom_without_slot = portage.dep.remove_slot(atom)
6180                                                 if atom.use:
6181                                                         atom_without_slot += str(atom.use)
6182                                                 atom_without_slot = portage.dep.Atom(atom_without_slot)
6183                                                 if portage.match_from_list(
6184                                                         atom_without_slot, [inst_pkg]):
6185                                                         cpv_list = [inst_pkg.cpv]
6186                                                 break
6187
6188                                 if not cpv_list:
6189                                         continue
6190                                 pkg_status = "merge"
6191                                 if installed or onlydeps:
6192                                         pkg_status = "nomerge"
6193                                 # descending order
6194                                 cpv_list.reverse()
6195                                 for cpv in cpv_list:
6196                                         # Make --noreplace take precedence over --newuse.
6197                                         if not installed and noreplace and \
6198                                                 cpv in vardb.match(atom):
6199                                                 # If the installed version is masked, it may
6200                                                 # be necessary to look at lower versions,
6201                                                 # in case there is a visible downgrade.
6202                                                 continue
6203                                         reinstall_for_flags = None
6204                                         cache_key = (pkg_type, root, cpv, pkg_status)
6205                                         calculated_use = True
6206                                         pkg = self._pkg_cache.get(cache_key)
6207                                         if pkg is None:
6208                                                 calculated_use = False
6209                                                 try:
6210                                                         metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6211                                                 except KeyError:
6212                                                         continue
6213                                                 pkg = Package(built=built, cpv=cpv,
6214                                                         installed=installed, metadata=metadata,
6215                                                         onlydeps=onlydeps, root_config=root_config,
6216                                                         type_name=pkg_type)
6217                                                 metadata = pkg.metadata
6218                                                 if not built:
6219                                                         metadata['CHOST'] = pkgsettings.get('CHOST', '')
6220                                                 if not built and ("?" in metadata["LICENSE"] or \
6221                                                         "?" in metadata["PROVIDE"]):
6222                                                         # This is avoided whenever possible because
6223                                                         # it's expensive. It only needs to be done here
6224                                                         # if it has an effect on visibility.
6225                                                         pkgsettings.setcpv(pkg)
6226                                                         metadata["USE"] = pkgsettings["PORTAGE_USE"]
6227                                                         calculated_use = True
6228                                                 self._pkg_cache[pkg] = pkg
6229
6230                                         if not installed or (built and matched_packages):
6231                                                 # Only enforce visibility on installed packages
6232                                                 # if there is at least one other visible package
6233                                                 # available. By filtering installed masked packages
6234                                                 # here, packages that have been masked since they
6235                                                 # were installed can be automatically downgraded
6236                                                 # to an unmasked version.
6237                                                 try:
6238                                                         if not visible(pkgsettings, pkg):
6239                                                                 continue
6240                                                 except portage.exception.InvalidDependString:
6241                                                         if not installed:
6242                                                                 continue
6243
6244                                                 # Enable upgrade or downgrade to a version
6245                                                 # with visible KEYWORDS when the installed
6246                                                 # version is masked by KEYWORDS, but never
6247                                                 # reinstall the same exact version only due
6248                                                 # to a KEYWORDS mask.
6249                                                 if built and matched_packages:
6250
6251                                                         different_version = None
6252                                                         for avail_pkg in matched_packages:
6253                                                                 if not portage.dep.cpvequal(
6254                                                                         pkg.cpv, avail_pkg.cpv):
6255                                                                         different_version = avail_pkg
6256                                                                         break
6257                                                         if different_version is not None:
6258
6259                                                                 if installed and \
6260                                                                         pkgsettings._getMissingKeywords(
6261                                                                         pkg.cpv, pkg.metadata):
6262                                                                         continue
6263
6264                                                                 # If the ebuild no longer exists or it's
6265                                                                 # keywords have been dropped, reject built
6266                                                                 # instances (installed or binary).
6267                                                                 # If --usepkgonly is enabled, assume that
6268                                                                 # the ebuild status should be ignored.
6269                                                                 if not usepkgonly:
6270                                                                         try:
6271                                                                                 pkg_eb = self._pkg(
6272                                                                                         pkg.cpv, "ebuild", root_config)
6273                                                                         except portage.exception.PackageNotFound:
6274                                                                                 continue
6275                                                                         else:
6276                                                                                 if not visible(pkgsettings, pkg_eb):
6277                                                                                         continue
6278
6279                                         if not pkg.built and not calculated_use:
6280                                                 # This is avoided whenever possible because
6281                                                 # it's expensive.
6282                                                 pkgsettings.setcpv(pkg)
6283                                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6284
6285                                         if pkg.cp != atom.cp:
6286                                                 # A cpv can be returned from dbapi.match() as an
6287                                                 # old-style virtual match even in cases when the
6288                                                 # package does not actually PROVIDE the virtual.
6289                                                 # Filter out any such false matches here.
6290                                                 if not atom_set.findAtomForPackage(pkg):
6291                                                         continue
6292
6293                                         myarg = None
6294                                         if root == self.target_root:
6295                                                 try:
6296                                                         # Ebuild USE must have been calculated prior
6297                                                         # to this point, in case atoms have USE deps.
6298                                                         myarg = self._iter_atoms_for_pkg(pkg).next()
6299                                                 except StopIteration:
6300                                                         pass
6301                                                 except portage.exception.InvalidDependString:
6302                                                         if not installed:
6303                                                                 # masked by corruption
6304                                                                 continue
6305                                         if not installed and myarg:
6306                                                 found_available_arg = True
6307
6308                                         if atom.use and not pkg.built:
6309                                                 use = pkg.use.enabled
6310                                                 if atom.use.enabled.difference(use):
6311                                                         continue
6312                                                 if atom.use.disabled.intersection(use):
6313                                                         continue
6314                                         if pkg.cp == atom_cp:
6315                                                 if highest_version is None:
6316                                                         highest_version = pkg
6317                                                 elif pkg > highest_version:
6318                                                         highest_version = pkg
6319                                         # At this point, we've found the highest visible
6320                                         # match from the current repo. Any lower versions
6321                                         # from this repo are ignored, so this so the loop
6322                                         # will always end with a break statement below
6323                                         # this point.
6324                                         if find_existing_node:
6325                                                 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6326                                                 if not e_pkg:
6327                                                         break
6328                                                 if portage.dep.match_from_list(atom, [e_pkg]):
6329                                                         if highest_version and \
6330                                                                 e_pkg.cp == atom_cp and \
6331                                                                 e_pkg < highest_version and \
6332                                                                 e_pkg.slot_atom != highest_version.slot_atom:
6333                                                                 # There is a higher version available in a
6334                                                                 # different slot, so this existing node is
6335                                                                 # irrelevant.
6336                                                                 pass
6337                                                         else:
6338                                                                 matched_packages.append(e_pkg)
6339                                                                 existing_node = e_pkg
6340                                                 break
6341                                         # Compare built package to current config and
6342                                         # reject the built package if necessary.
6343                                         if built and not installed and \
6344                                                 ("--newuse" in self.myopts or \
6345                                                 "--reinstall" in self.myopts):
6346                                                 iuses = pkg.iuse.all
6347                                                 old_use = pkg.use.enabled
6348                                                 if myeb:
6349                                                         pkgsettings.setcpv(myeb)
6350                                                 else:
6351                                                         pkgsettings.setcpv(pkg)
6352                                                 now_use = pkgsettings["PORTAGE_USE"].split()
6353                                                 forced_flags = set()
6354                                                 forced_flags.update(pkgsettings.useforce)
6355                                                 forced_flags.update(pkgsettings.usemask)
6356                                                 cur_iuse = iuses
6357                                                 if myeb and not usepkgonly:
6358                                                         cur_iuse = myeb.iuse.all
6359                                                 if self._reinstall_for_flags(forced_flags,
6360                                                         old_use, iuses,
6361                                                         now_use, cur_iuse):
6362                                                         break
6363                                         # Compare current config to installed package
6364                                         # and do not reinstall if possible.
6365                                         if not installed and \
6366                                                 ("--newuse" in self.myopts or \
6367                                                 "--reinstall" in self.myopts) and \
6368                                                 cpv in vardb.match(atom):
6369                                                 pkgsettings.setcpv(pkg)
6370                                                 forced_flags = set()
6371                                                 forced_flags.update(pkgsettings.useforce)
6372                                                 forced_flags.update(pkgsettings.usemask)
6373                                                 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6374                                                 old_iuse = set(filter_iuse_defaults(
6375                                                         vardb.aux_get(cpv, ["IUSE"])[0].split()))
6376                                                 cur_use = pkgsettings["PORTAGE_USE"].split()
6377                                                 cur_iuse = pkg.iuse.all
6378                                                 reinstall_for_flags = \
6379                                                         self._reinstall_for_flags(
6380                                                         forced_flags, old_use, old_iuse,
6381                                                         cur_use, cur_iuse)
6382                                                 if reinstall_for_flags:
6383                                                         reinstall = True
6384                                         if not built:
6385                                                 myeb = pkg
6386                                         matched_packages.append(pkg)
6387                                         if reinstall_for_flags:
6388                                                 self._reinstall_nodes[pkg] = \
6389                                                         reinstall_for_flags
6390                                         break
6391
6392                 if not matched_packages:
6393                         return None, None
6394
6395                 if "--debug" in self.myopts:
6396                         for pkg in matched_packages:
6397                                 portage.writemsg("%s %s\n" % \
6398                                         ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6399
6400                 # Filter out any old-style virtual matches if they are
6401                 # mixed with new-style virtual matches.
6402                 cp = portage.dep_getkey(atom)
6403                 if len(matched_packages) > 1 and \
6404                         "virtual" == portage.catsplit(cp)[0]:
6405                         for pkg in matched_packages:
6406                                 if pkg.cp != cp:
6407                                         continue
6408                                 # Got a new-style virtual, so filter
6409                                 # out any old-style virtuals.
6410                                 matched_packages = [pkg for pkg in matched_packages \
6411                                         if pkg.cp == cp]
6412                                 break
6413
6414                 if len(matched_packages) > 1:
6415                         bestmatch = portage.best(
6416                                 [pkg.cpv for pkg in matched_packages])
6417                         matched_packages = [pkg for pkg in matched_packages \
6418                                 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6419
6420                 # ordered by type preference ("ebuild" type is the last resort)
6421                 return  matched_packages[-1], existing_node
6422
6423         def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6424                 """
6425                 Select packages that have already been added to the graph or
6426                 those that are installed and have not been scheduled for
6427                 replacement.
6428                 """
6429                 graph_db = self._graph_trees[root]["porttree"].dbapi
6430                 matches = graph_db.match_pkgs(atom)
6431                 if not matches:
6432                         return None, None
6433                 pkg = matches[-1] # highest match
6434                 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
6435                 return pkg, in_graph
6436
6437         def _complete_graph(self):
6438                 """
6439                 Add any deep dependencies of required sets (args, system, world) that
6440                 have not been pulled into the graph yet. This ensures that the graph
6441                 is consistent such that initially satisfied deep dependencies are not
6442                 broken in the new graph. Initially unsatisfied dependencies are
6443                 irrelevant since we only want to avoid breaking dependencies that are
6444                 intially satisfied.
6445
6446                 Since this method can consume enough time to disturb users, it is
6447                 currently only enabled by the --complete-graph option.
6448                 """
6449                 if "--buildpkgonly" in self.myopts or \
6450                         "recurse" not in self.myparams:
6451                         return 1
6452
6453                 if "complete" not in self.myparams:
6454                         # Skip this to avoid consuming enough time to disturb users.
6455                         return 1
6456
6457                 # Put the depgraph into a mode that causes it to only
6458                 # select packages that have already been added to the
6459                 # graph or those that are installed and have not been
6460                 # scheduled for replacement. Also, toggle the "deep"
6461                 # parameter so that all dependencies are traversed and
6462                 # accounted for.
6463                 self._select_atoms = self._select_atoms_from_graph
6464                 self._select_package = self._select_pkg_from_graph
6465                 already_deep = "deep" in self.myparams
6466                 if not already_deep:
6467                         self.myparams.add("deep")
6468
6469                 for root in self.roots:
6470                         required_set_names = self._required_set_names.copy()
6471                         if root == self.target_root and \
6472                                 (already_deep or "empty" in self.myparams):
6473                                 required_set_names.difference_update(self._sets)
6474                         if not required_set_names and not self._ignored_deps:
6475                                 continue
6476                         root_config = self.roots[root]
6477                         setconfig = root_config.setconfig
6478                         args = []
6479                         # Reuse existing SetArg instances when available.
6480                         for arg in self.digraph.root_nodes():
6481                                 if not isinstance(arg, SetArg):
6482                                         continue
6483                                 if arg.root_config != root_config:
6484                                         continue
6485                                 if arg.name in required_set_names:
6486                                         args.append(arg)
6487                                         required_set_names.remove(arg.name)
6488                         # Create new SetArg instances only when necessary.
6489                         for s in required_set_names:
6490                                 expanded_set = InternalPackageSet(
6491                                         initial_atoms=setconfig.getSetAtoms(s))
6492                                 atom = SETPREFIX + s
6493                                 args.append(SetArg(arg=atom, set=expanded_set,
6494                                         root_config=root_config))
6495                         vardb = root_config.trees["vartree"].dbapi
6496                         for arg in args:
6497                                 for atom in arg.set:
6498                                         self._dep_stack.append(
6499                                                 Dependency(atom=atom, root=root, parent=arg))
6500                         if self._ignored_deps:
6501                                 self._dep_stack.extend(self._ignored_deps)
6502                                 self._ignored_deps = []
6503                         if not self._create_graph(allow_unsatisfied=True):
6504                                 return 0
6505                         # Check the unsatisfied deps to see if any initially satisfied deps
6506                         # will become unsatisfied due to an upgrade. Initially unsatisfied
6507                         # deps are irrelevant since we only want to avoid breaking deps
6508                         # that are initially satisfied.
6509                         while self._unsatisfied_deps:
6510                                 dep = self._unsatisfied_deps.pop()
6511                                 matches = vardb.match_pkgs(dep.atom)
6512                                 if not matches:
6513                                         self._initially_unsatisfied_deps.append(dep)
6514                                         continue
6515                                 # An scheduled installation broke a deep dependency.
6516                                 # Add the installed package to the graph so that it
6517                                 # will be appropriately reported as a slot collision
6518                                 # (possibly solvable via backtracking).
6519                                 pkg = matches[-1] # highest match
6520                                 if not self._add_pkg(pkg, dep):
6521                                         return 0
6522                                 if not self._create_graph(allow_unsatisfied=True):
6523                                         return 0
6524                 return 1
6525
6526         def _pkg(self, cpv, type_name, root_config, installed=False):
6527                 """
6528                 Get a package instance from the cache, or create a new
6529                 one if necessary. Raises KeyError from aux_get if it
6530                 failures for some reason (package does not exist or is
6531                 corrupt).
6532                 """
6533                 operation = "merge"
6534                 if installed:
6535                         operation = "nomerge"
6536                 pkg = self._pkg_cache.get(
6537                         (type_name, root_config.root, cpv, operation))
6538                 if pkg is None:
6539                         tree_type = self.pkg_tree_map[type_name]
6540                         db = root_config.trees[tree_type].dbapi
6541                         db_keys = list(self._trees_orig[root_config.root][
6542                                 tree_type].dbapi._aux_cache_keys)
6543                         try:
6544                                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6545                         except KeyError:
6546                                 raise portage.exception.PackageNotFound(cpv)
6547                         pkg = Package(cpv=cpv, metadata=metadata,
6548                                 root_config=root_config, installed=installed)
6549                         if type_name == "ebuild":
6550                                 settings = self.pkgsettings[root_config.root]
6551                                 settings.setcpv(pkg)
6552                                 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6553                                 pkg.metadata['CHOST'] = settings.get('CHOST', '')
6554                         self._pkg_cache[pkg] = pkg
6555                 return pkg
6556
6557         def validate_blockers(self):
6558                 """Remove any blockers from the digraph that do not match any of the
6559                 packages within the graph.  If necessary, create hard deps to ensure
6560                 correct merge order such that mutually blocking packages are never
6561                 installed simultaneously."""
6562
6563                 if "--buildpkgonly" in self.myopts or \
6564                         "--nodeps" in self.myopts:
6565                         return True
6566
6567                 #if "deep" in self.myparams:
6568                 if True:
6569                         # Pull in blockers from all installed packages that haven't already
6570                         # been pulled into the depgraph.  This is not enabled by default
6571                         # due to the performance penalty that is incurred by all the
6572                         # additional dep_check calls that are required.
6573
6574                         dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6575                         for myroot in self.trees:
6576                                 vardb = self.trees[myroot]["vartree"].dbapi
6577                                 portdb = self.trees[myroot]["porttree"].dbapi
6578                                 pkgsettings = self.pkgsettings[myroot]
6579                                 final_db = self.mydbapi[myroot]
6580
6581                                 blocker_cache = BlockerCache(myroot, vardb)
6582                                 stale_cache = set(blocker_cache)
6583                                 for pkg in vardb:
6584                                         cpv = pkg.cpv
6585                                         stale_cache.discard(cpv)
6586                                         pkg_in_graph = self.digraph.contains(pkg)
6587
6588                                         # Check for masked installed packages. Only warn about
6589                                         # packages that are in the graph in order to avoid warning
6590                                         # about those that will be automatically uninstalled during
6591                                         # the merge process or by --depclean.
6592                                         if pkg in final_db:
6593                                                 if pkg_in_graph and not visible(pkgsettings, pkg):
6594                                                         self._masked_installed.add(pkg)
6595
6596                                         blocker_atoms = None
6597                                         blockers = None
6598                                         if pkg_in_graph:
6599                                                 blockers = []
6600                                                 try:
6601                                                         blockers.extend(
6602                                                                 self._blocker_parents.child_nodes(pkg))
6603                                                 except KeyError:
6604                                                         pass
6605                                                 try:
6606                                                         blockers.extend(
6607                                                                 self._irrelevant_blockers.child_nodes(pkg))
6608                                                 except KeyError:
6609                                                         pass
6610                                         if blockers is not None:
6611                                                 blockers = set(str(blocker.atom) \
6612                                                         for blocker in blockers)
6613
6614                                         # If this node has any blockers, create a "nomerge"
6615                                         # node for it so that they can be enforced.
6616                                         self.spinner.update()
6617                                         blocker_data = blocker_cache.get(cpv)
6618                                         if blocker_data is not None and \
6619                                                 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6620                                                 blocker_data = None
6621
6622                                         # If blocker data from the graph is available, use
6623                                         # it to validate the cache and update the cache if
6624                                         # it seems invalid.
6625                                         if blocker_data is not None and \
6626                                                 blockers is not None:
6627                                                 if not blockers.symmetric_difference(
6628                                                         blocker_data.atoms):
6629                                                         continue
6630                                                 blocker_data = None
6631
6632                                         if blocker_data is None and \
6633                                                 blockers is not None:
6634                                                 # Re-use the blockers from the graph.
6635                                                 blocker_atoms = sorted(blockers)
6636                                                 counter = long(pkg.metadata["COUNTER"])
6637                                                 blocker_data = \
6638                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6639                                                 blocker_cache[pkg.cpv] = blocker_data
6640                                                 continue
6641
6642                                         if blocker_data:
6643                                                 blocker_atoms = blocker_data.atoms
6644                                         else:
6645                                                 # Use aux_get() to trigger FakeVartree global
6646                                                 # updates on *DEPEND when appropriate.
6647                                                 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6648                                                 # It is crucial to pass in final_db here in order to
6649                                                 # optimize dep_check calls by eliminating atoms via
6650                                                 # dep_wordreduce and dep_eval calls.
6651                                                 try:
6652                                                         portage.dep._dep_check_strict = False
6653                                                         try:
6654                                                                 success, atoms = portage.dep_check(depstr,
6655                                                                         final_db, pkgsettings, myuse=pkg.use.enabled,
6656                                                                         trees=self._graph_trees, myroot=myroot)
6657                                                         except Exception, e:
6658                                                                 if isinstance(e, SystemExit):
6659                                                                         raise
6660                                                                 # This is helpful, for example, if a ValueError
6661                                                                 # is thrown from cpv_expand due to multiple
6662                                                                 # matches (this can happen if an atom lacks a
6663                                                                 # category).
6664                                                                 show_invalid_depstring_notice(
6665                                                                         pkg, depstr, str(e))
6666                                                                 del e
6667                                                                 raise
6668                                                 finally:
6669                                                         portage.dep._dep_check_strict = True
6670                                                 if not success:
6671                                                         replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6672                                                         if replacement_pkg and \
6673                                                                 replacement_pkg[0].operation == "merge":
6674                                                                 # This package is being replaced anyway, so
6675                                                                 # ignore invalid dependencies so as not to
6676                                                                 # annoy the user too much (otherwise they'd be
6677                                                                 # forced to manually unmerge it first).
6678                                                                 continue
6679                                                         show_invalid_depstring_notice(pkg, depstr, atoms)
6680                                                         return False
6681                                                 blocker_atoms = [myatom for myatom in atoms \
6682                                                         if myatom.startswith("!")]
6683                                                 blocker_atoms.sort()
6684                                                 counter = long(pkg.metadata["COUNTER"])
6685                                                 blocker_cache[cpv] = \
6686                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6687                                         if blocker_atoms:
6688                                                 try:
6689                                                         for atom in blocker_atoms:
6690                                                                 blocker = Blocker(atom=portage.dep.Atom(atom),
6691                                                                         eapi=pkg.metadata["EAPI"], root=myroot)
6692                                                                 self._blocker_parents.add(blocker, pkg)
6693                                                 except portage.exception.InvalidAtom, e:
6694                                                         depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6695                                                         show_invalid_depstring_notice(
6696                                                                 pkg, depstr, "Invalid Atom: %s" % (e,))
6697                                                         return False
6698                                 for cpv in stale_cache:
6699                                         del blocker_cache[cpv]
6700                                 blocker_cache.flush()
6701                                 del blocker_cache
6702
6703                 # Discard any "uninstall" tasks scheduled by previous calls
6704                 # to this method, since those tasks may not make sense given
6705                 # the current graph state.
6706                 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6707                 if previous_uninstall_tasks:
6708                         self._blocker_uninstalls = digraph()
6709                         self.digraph.difference_update(previous_uninstall_tasks)
6710
6711                 for blocker in self._blocker_parents.leaf_nodes():
6712                         self.spinner.update()
6713                         root_config = self.roots[blocker.root]
6714                         virtuals = root_config.settings.getvirtuals()
6715                         myroot = blocker.root
6716                         initial_db = self.trees[myroot]["vartree"].dbapi
6717                         final_db = self.mydbapi[myroot]
6718                         
6719                         provider_virtual = False
6720                         if blocker.cp in virtuals and \
6721                                 not self._have_new_virt(blocker.root, blocker.cp):
6722                                 provider_virtual = True
6723
6724                         if provider_virtual:
6725                                 atoms = []
6726                                 for provider_entry in virtuals[blocker.cp]:
6727                                         provider_cp = \
6728                                                 portage.dep_getkey(provider_entry)
6729                                         atoms.append(blocker.atom.replace(
6730                                                 blocker.cp, provider_cp))
6731                         else:
6732                                 atoms = [blocker.atom]
6733
6734                         blocked_initial = []
6735                         for atom in atoms:
6736                                 blocked_initial.extend(initial_db.match_pkgs(atom))
6737
6738                         blocked_final = []
6739                         for atom in atoms:
6740                                 blocked_final.extend(final_db.match_pkgs(atom))
6741
6742                         if not blocked_initial and not blocked_final:
6743                                 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6744                                 self._blocker_parents.remove(blocker)
6745                                 # Discard any parents that don't have any more blockers.
6746                                 for pkg in parent_pkgs:
6747                                         self._irrelevant_blockers.add(blocker, pkg)
6748                                         if not self._blocker_parents.child_nodes(pkg):
6749                                                 self._blocker_parents.remove(pkg)
6750                                 continue
6751                         for parent in self._blocker_parents.parent_nodes(blocker):
6752                                 unresolved_blocks = False
6753                                 depends_on_order = set()
6754                                 for pkg in blocked_initial:
6755                                         if pkg.slot_atom == parent.slot_atom:
6756                                                 # TODO: Support blocks within slots in cases where it
6757                                                 # might make sense.  For example, a new version might
6758                                                 # require that the old version be uninstalled at build
6759                                                 # time.
6760                                                 continue
6761                                         if parent.installed:
6762                                                 # Two currently installed packages conflict with
6763                                                 # eachother. Ignore this case since the damage
6764                                                 # is already done and this would be likely to
6765                                                 # confuse users if displayed like a normal blocker.
6766                                                 continue
6767
6768                                         self._blocked_pkgs.add(pkg, blocker)
6769
6770                                         if parent.operation == "merge":
6771                                                 # Maybe the blocked package can be replaced or simply
6772                                                 # unmerged to resolve this block.
6773                                                 depends_on_order.add((pkg, parent))
6774                                                 continue
6775                                         # None of the above blocker resolutions techniques apply,
6776                                         # so apparently this one is unresolvable.
6777                                         unresolved_blocks = True
6778                                 for pkg in blocked_final:
6779                                         if pkg.slot_atom == parent.slot_atom:
6780                                                 # TODO: Support blocks within slots.
6781                                                 continue
6782                                         if parent.operation == "nomerge" and \
6783                                                 pkg.operation == "nomerge":
6784                                                 # This blocker will be handled the next time that a
6785                                                 # merge of either package is triggered.
6786                                                 continue
6787
6788                                         self._blocked_pkgs.add(pkg, blocker)
6789
6790                                         # Maybe the blocking package can be
6791                                         # unmerged to resolve this block.
6792                                         if parent.operation == "merge" and pkg.installed:
6793                                                 depends_on_order.add((pkg, parent))
6794                                                 continue
6795                                         elif parent.operation == "nomerge":
6796                                                 depends_on_order.add((parent, pkg))
6797                                                 continue
6798                                         # None of the above blocker resolutions techniques apply,
6799                                         # so apparently this one is unresolvable.
6800                                         unresolved_blocks = True
6801
6802                                 # Make sure we don't unmerge any package that have been pulled
6803                                 # into the graph.
6804                                 if not unresolved_blocks and depends_on_order:
6805                                         for inst_pkg, inst_task in depends_on_order:
6806                                                 if self.digraph.contains(inst_pkg) and \
6807                                                         self.digraph.parent_nodes(inst_pkg):
6808                                                         unresolved_blocks = True
6809                                                         break
6810
6811                                 if not unresolved_blocks and depends_on_order:
6812                                         for inst_pkg, inst_task in depends_on_order:
6813                                                 uninst_task = Package(built=inst_pkg.built,
6814                                                         cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6815                                                         metadata=inst_pkg.metadata,
6816                                                         operation="uninstall",
6817                                                         root_config=inst_pkg.root_config,
6818                                                         type_name=inst_pkg.type_name)
6819                                                 self._pkg_cache[uninst_task] = uninst_task
6820                                                 # Enforce correct merge order with a hard dep.
6821                                                 self.digraph.addnode(uninst_task, inst_task,
6822                                                         priority=BlockerDepPriority.instance)
6823                                                 # Count references to this blocker so that it can be
6824                                                 # invalidated after nodes referencing it have been
6825                                                 # merged.
6826                                                 self._blocker_uninstalls.addnode(uninst_task, blocker)
6827                                 if not unresolved_blocks and not depends_on_order:
6828                                         self._irrelevant_blockers.add(blocker, parent)
6829                                         self._blocker_parents.remove_edge(blocker, parent)
6830                                         if not self._blocker_parents.parent_nodes(blocker):
6831                                                 self._blocker_parents.remove(blocker)
6832                                         if not self._blocker_parents.child_nodes(parent):
6833                                                 self._blocker_parents.remove(parent)
6834                                 if unresolved_blocks:
6835                                         self._unsolvable_blockers.add(blocker, parent)
6836
6837                 return True
6838
6839         def _accept_blocker_conflicts(self):
6840                 acceptable = False
6841                 for x in ("--buildpkgonly", "--fetchonly",
6842                         "--fetch-all-uri", "--nodeps"):
6843                         if x in self.myopts:
6844                                 acceptable = True
6845                                 break
6846                 return acceptable
6847
6848         def _merge_order_bias(self, mygraph):
6849                 """
6850                 For optimal leaf node selection, promote deep system runtime deps and
6851                 order nodes from highest to lowest overall reference count.
6852                 """
6853
6854                 node_info = {}
6855                 for node in mygraph.order:
6856                         node_info[node] = len(mygraph.parent_nodes(node))
6857                 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
6858
6859                 def cmp_merge_preference(node1, node2):
6860
6861                         if node1.operation == 'uninstall':
6862                                 if node2.operation == 'uninstall':
6863                                         return 0
6864                                 return 1
6865
6866                         if node2.operation == 'uninstall':
6867                                 if node1.operation == 'uninstall':
6868                                         return 0
6869                                 return -1
6870
6871                         node1_sys = node1 in deep_system_deps
6872                         node2_sys = node2 in deep_system_deps
6873                         if node1_sys != node2_sys:
6874                                 if node1_sys:
6875                                         return -1
6876                                 return 1
6877
6878                         return node_info[node2] - node_info[node1]
6879
6880                 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
6881
6882         def altlist(self, reversed=False):
6883
6884                 while self._serialized_tasks_cache is None:
6885                         self._resolve_conflicts()
6886                         try:
6887                                 self._serialized_tasks_cache, self._scheduler_graph = \
6888                                         self._serialize_tasks()
6889                         except self._serialize_tasks_retry:
6890                                 pass
6891
6892                 retlist = self._serialized_tasks_cache[:]
6893                 if reversed:
6894                         retlist.reverse()
6895                 return retlist
6896
6897         def schedulerGraph(self):
6898                 """
6899                 The scheduler graph is identical to the normal one except that
6900                 uninstall edges are reversed in specific cases that require
6901                 conflicting packages to be temporarily installed simultaneously.
6902                 This is intended for use by the Scheduler in it's parallelization
6903                 logic. It ensures that temporary simultaneous installation of
6904                 conflicting packages is avoided when appropriate (especially for
6905                 !!atom blockers), but allowed in specific cases that require it.
6906
6907                 Note that this method calls break_refs() which alters the state of
6908                 internal Package instances such that this depgraph instance should
6909                 not be used to perform any more calculations.
6910                 """
6911                 if self._scheduler_graph is None:
6912                         self.altlist()
6913                 self.break_refs(self._scheduler_graph.order)
6914                 return self._scheduler_graph
6915
6916         def break_refs(self, nodes):
6917                 """
6918                 Take a mergelist like that returned from self.altlist() and
6919                 break any references that lead back to the depgraph. This is
6920                 useful if you want to hold references to packages without
6921                 also holding the depgraph on the heap.
6922                 """
6923                 for node in nodes:
6924                         if hasattr(node, "root_config"):
6925                                 # The FakeVartree references the _package_cache which
6926                                 # references the depgraph. So that Package instances don't
6927                                 # hold the depgraph and FakeVartree on the heap, replace
6928                                 # the RootConfig that references the FakeVartree with the
6929                                 # original RootConfig instance which references the actual
6930                                 # vartree.
6931                                 node.root_config = \
6932                                         self._trees_orig[node.root_config.root]["root_config"]
6933
6934         def _resolve_conflicts(self):
6935                 if not self._complete_graph():
6936                         raise self._unknown_internal_error()
6937
6938                 if not self.validate_blockers():
6939                         raise self._unknown_internal_error()
6940
6941                 if self._slot_collision_info:
6942                         self._process_slot_conflicts()
6943
6944         def _serialize_tasks(self):
6945
6946                 if "--debug" in self.myopts:
6947                         writemsg("\ndigraph:\n\n", noiselevel=-1)
6948                         self.digraph.debug_print()
6949                         writemsg("\n", noiselevel=-1)
6950
6951                 scheduler_graph = self.digraph.copy()
6952                 mygraph=self.digraph.copy()
6953                 # Prune "nomerge" root nodes if nothing depends on them, since
6954                 # otherwise they slow down merge order calculation. Don't remove
6955                 # non-root nodes since they help optimize merge order in some cases
6956                 # such as revdep-rebuild.
6957                 removed_nodes = set()
6958                 while True:
6959                         for node in mygraph.root_nodes():
6960                                 if not isinstance(node, Package) or \
6961                                         node.installed or node.onlydeps:
6962                                         removed_nodes.add(node)
6963                         if removed_nodes:
6964                                 self.spinner.update()
6965                                 mygraph.difference_update(removed_nodes)
6966                         if not removed_nodes:
6967                                 break
6968                         removed_nodes.clear()
6969                 self._merge_order_bias(mygraph)
6970                 def cmp_circular_bias(n1, n2):
6971                         """
6972                         RDEPEND is stronger than PDEPEND and this function
6973                         measures such a strength bias within a circular
6974                         dependency relationship.
6975                         """
6976                         n1_n2_medium = n2 in mygraph.child_nodes(n1,
6977                                 ignore_priority=priority_range.ignore_medium_soft)
6978                         n2_n1_medium = n1 in mygraph.child_nodes(n2,
6979                                 ignore_priority=priority_range.ignore_medium_soft)
6980                         if n1_n2_medium == n2_n1_medium:
6981                                 return 0
6982                         elif n1_n2_medium:
6983                                 return 1
6984                         return -1
6985                 myblocker_uninstalls = self._blocker_uninstalls.copy()
6986                 retlist=[]
6987                 # Contains uninstall tasks that have been scheduled to
6988                 # occur after overlapping blockers have been installed.
6989                 scheduled_uninstalls = set()
6990                 # Contains any Uninstall tasks that have been ignored
6991                 # in order to avoid the circular deps code path. These
6992                 # correspond to blocker conflicts that could not be
6993                 # resolved.
6994                 ignored_uninstall_tasks = set()
6995                 have_uninstall_task = False
6996                 complete = "complete" in self.myparams
6997                 asap_nodes = []
6998
6999                 def get_nodes(**kwargs):
7000                         """
7001                         Returns leaf nodes excluding Uninstall instances
7002                         since those should be executed as late as possible.
7003                         """
7004                         return [node for node in mygraph.leaf_nodes(**kwargs) \
7005                                 if isinstance(node, Package) and \
7006                                         (node.operation != "uninstall" or \
7007                                         node in scheduled_uninstalls)]
7008
7009                 # sys-apps/portage needs special treatment if ROOT="/"
7010                 running_root = self._running_root.root
7011                 from portage.const import PORTAGE_PACKAGE_ATOM
7012                 runtime_deps = InternalPackageSet(
7013                         initial_atoms=[PORTAGE_PACKAGE_ATOM])
7014                 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
7015                         PORTAGE_PACKAGE_ATOM)
7016                 replacement_portage = self.mydbapi[running_root].match_pkgs(
7017                         PORTAGE_PACKAGE_ATOM)
7018
7019                 if running_portage:
7020                         running_portage = running_portage[0]
7021                 else:
7022                         running_portage = None
7023
7024                 if replacement_portage:
7025                         replacement_portage = replacement_portage[0]
7026                 else:
7027                         replacement_portage = None
7028
7029                 if replacement_portage == running_portage:
7030                         replacement_portage = None
7031
7032                 if replacement_portage is not None:
7033                         # update from running_portage to replacement_portage asap
7034                         asap_nodes.append(replacement_portage)
7035
7036                 if running_portage is not None:
7037                         try:
7038                                 portage_rdepend = self._select_atoms_highest_available(
7039                                         running_root, running_portage.metadata["RDEPEND"],
7040                                         myuse=running_portage.use.enabled,
7041                                         parent=running_portage, strict=False)
7042                         except portage.exception.InvalidDependString, e:
7043                                 portage.writemsg("!!! Invalid RDEPEND in " + \
7044                                         "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
7045                                         (running_root, running_portage.cpv, e), noiselevel=-1)
7046                                 del e
7047                                 portage_rdepend = []
7048                         runtime_deps.update(atom for atom in portage_rdepend \
7049                                 if not atom.startswith("!"))
7050
7051                 def gather_deps(ignore_priority, mergeable_nodes,
7052                         selected_nodes, node):
7053                         """
7054                         Recursively gather a group of nodes that RDEPEND on
7055                         eachother. This ensures that they are merged as a group
7056                         and get their RDEPENDs satisfied as soon as possible.
7057                         """
7058                         if node in selected_nodes:
7059                                 return True
7060                         if node not in mergeable_nodes:
7061                                 return False
7062                         if node == replacement_portage and \
7063                                 mygraph.child_nodes(node,
7064                                 ignore_priority=priority_range.ignore_medium_soft):
7065                                 # Make sure that portage always has all of it's
7066                                 # RDEPENDs installed first.
7067                                 return False
7068                         selected_nodes.add(node)
7069                         for child in mygraph.child_nodes(node,
7070                                 ignore_priority=ignore_priority):
7071                                 if not gather_deps(ignore_priority,
7072                                         mergeable_nodes, selected_nodes, child):
7073                                         return False
7074                         return True
7075
7076                 def ignore_uninst_or_med(priority):
7077                         if priority is BlockerDepPriority.instance:
7078                                 return True
7079                         return priority_range.ignore_medium(priority)
7080
7081                 def ignore_uninst_or_med_soft(priority):
7082                         if priority is BlockerDepPriority.instance:
7083                                 return True
7084                         return priority_range.ignore_medium_soft(priority)
7085
7086                 tree_mode = "--tree" in self.myopts
7087                 # Tracks whether or not the current iteration should prefer asap_nodes
7088                 # if available.  This is set to False when the previous iteration
7089                 # failed to select any nodes.  It is reset whenever nodes are
7090                 # successfully selected.
7091                 prefer_asap = True
7092
7093                 # Controls whether or not the current iteration should drop edges that
7094                 # are "satisfied" by installed packages, in order to solve circular
7095                 # dependencies. The deep runtime dependencies of installed packages are
7096                 # not checked in this case (bug #199856), so it must be avoided
7097                 # whenever possible.
7098                 drop_satisfied = False
7099
7100                 # State of variables for successive iterations that loosen the
7101                 # criteria for node selection.
7102                 #
7103                 # iteration   prefer_asap   drop_satisfied
7104                 # 1           True          False
7105                 # 2           False         False
7106                 # 3           False         True
7107                 #
7108                 # If no nodes are selected on the last iteration, it is due to
7109                 # unresolved blockers or circular dependencies.
7110
7111                 while not mygraph.empty():
7112                         self.spinner.update()
7113                         selected_nodes = None
7114                         ignore_priority = None
7115                         if drop_satisfied or (prefer_asap and asap_nodes):
7116                                 priority_range = DepPrioritySatisfiedRange
7117                         else:
7118                                 priority_range = DepPriorityNormalRange
7119                         if prefer_asap and asap_nodes:
7120                                 # ASAP nodes are merged before their soft deps. Go ahead and
7121                                 # select root nodes here if necessary, since it's typical for
7122                                 # the parent to have been removed from the graph already.
7123                                 asap_nodes = [node for node in asap_nodes \
7124                                         if mygraph.contains(node)]
7125                                 for node in asap_nodes:
7126                                         if not mygraph.child_nodes(node,
7127                                                 ignore_priority=priority_range.ignore_soft):
7128                                                 selected_nodes = [node]
7129                                                 asap_nodes.remove(node)
7130                                                 break
7131                         if not selected_nodes and \
7132                                 not (prefer_asap and asap_nodes):
7133                                 for i in xrange(priority_range.NONE,
7134                                         priority_range.MEDIUM_SOFT + 1):
7135                                         ignore_priority = priority_range.ignore_priority[i]
7136                                         nodes = get_nodes(ignore_priority=ignore_priority)
7137                                         if nodes:
7138                                                 # If there is a mix of uninstall nodes with other
7139                                                 # types, save the uninstall nodes for later since
7140                                                 # sometimes a merge node will render an uninstall
7141                                                 # node unnecessary (due to occupying the same slot),
7142                                                 # and we want to avoid executing a separate uninstall
7143                                                 # task in that case.
7144                                                 if len(nodes) > 1:
7145                                                         good_uninstalls = []
7146                                                         with_some_uninstalls_excluded = []
7147                                                         for node in nodes:
7148                                                                 if node.operation == "uninstall":
7149                                                                         slot_node = self.mydbapi[node.root
7150                                                                                 ].match_pkgs(node.slot_atom)
7151                                                                         if slot_node and \
7152                                                                                 slot_node[0].operation == "merge":
7153                                                                                 continue
7154                                                                         good_uninstalls.append(node)
7155                                                                 with_some_uninstalls_excluded.append(node)
7156                                                         if good_uninstalls:
7157                                                                 nodes = good_uninstalls
7158                                                         elif with_some_uninstalls_excluded:
7159                                                                 nodes = with_some_uninstalls_excluded
7160                                                         else:
7161                                                                 nodes = nodes
7162
7163                                                 if ignore_priority is None and not tree_mode:
7164                                                         # Greedily pop all of these nodes since no
7165                                                         # relationship has been ignored. This optimization
7166                                                         # destroys --tree output, so it's disabled in tree
7167                                                         # mode.
7168                                                         selected_nodes = nodes
7169                                                 else:
7170                                                         # For optimal merge order:
7171                                                         #  * Only pop one node.
7172                                                         #  * Removing a root node (node without a parent)
7173                                                         #    will not produce a leaf node, so avoid it.
7174                                                         #  * It's normal for a selected uninstall to be a
7175                                                         #    root node, so don't check them for parents.
7176                                                         for node in nodes:
7177                                                                 if node.operation == "uninstall" or \
7178                                                                         mygraph.parent_nodes(node):
7179                                                                         selected_nodes = [node]
7180                                                                         break
7181
7182                                                 if selected_nodes:
7183                                                         break
7184
7185                         if not selected_nodes:
7186                                 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
7187                                 if nodes:
7188                                         mergeable_nodes = set(nodes)
7189                                         if prefer_asap and asap_nodes:
7190                                                 nodes = asap_nodes
7191                                         for i in xrange(priority_range.SOFT,
7192                                                 priority_range.MEDIUM_SOFT + 1):
7193                                                 ignore_priority = priority_range.ignore_priority[i]
7194                                                 for node in nodes:
7195                                                         if not mygraph.parent_nodes(node):
7196                                                                 continue
7197                                                         selected_nodes = set()
7198                                                         if gather_deps(ignore_priority,
7199                                                                 mergeable_nodes, selected_nodes, node):
7200                                                                 break
7201                                                         else:
7202                                                                 selected_nodes = None
7203                                                 if selected_nodes:
7204                                                         break
7205
7206                                         if prefer_asap and asap_nodes and not selected_nodes:
7207                                                 # We failed to find any asap nodes to merge, so ignore
7208                                                 # them for the next iteration.
7209                                                 prefer_asap = False
7210                                                 continue
7211
7212                         if selected_nodes and ignore_priority is not None:
7213                                 # Try to merge ignored medium_soft deps as soon as possible
7214                                 # if they're not satisfied by installed packages.
7215                                 for node in selected_nodes:
7216                                         children = set(mygraph.child_nodes(node))
7217                                         soft = children.difference(
7218                                                 mygraph.child_nodes(node,
7219                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
7220                                         medium_soft = children.difference(
7221                                                 mygraph.child_nodes(node,
7222                                                         ignore_priority = \
7223                                                         DepPrioritySatisfiedRange.ignore_medium_soft))
7224                                         medium_soft.difference_update(soft)
7225                                         for child in medium_soft:
7226                                                 if child in selected_nodes:
7227                                                         continue
7228                                                 if child in asap_nodes:
7229                                                         continue
7230                                                 asap_nodes.append(child)
7231
7232                         if selected_nodes and len(selected_nodes) > 1:
7233                                 if not isinstance(selected_nodes, list):
7234                                         selected_nodes = list(selected_nodes)
7235                                 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
7236
7237                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7238                                 # An Uninstall task needs to be executed in order to
7239                                 # avoid conflict if possible.
7240
7241                                 if drop_satisfied:
7242                                         priority_range = DepPrioritySatisfiedRange
7243                                 else:
7244                                         priority_range = DepPriorityNormalRange
7245
7246                                 mergeable_nodes = get_nodes(
7247                                         ignore_priority=ignore_uninst_or_med)
7248
7249                                 min_parent_deps = None
7250                                 uninst_task = None
7251                                 for task in myblocker_uninstalls.leaf_nodes():
7252                                         # Do some sanity checks so that system or world packages
7253                                         # don't get uninstalled inappropriately here (only really
7254                                         # necessary when --complete-graph has not been enabled).
7255
7256                                         if task in ignored_uninstall_tasks:
7257                                                 continue
7258
7259                                         if task in scheduled_uninstalls:
7260                                                 # It's been scheduled but it hasn't
7261                                                 # been executed yet due to dependence
7262                                                 # on installation of blocking packages.
7263                                                 continue
7264
7265                                         root_config = self.roots[task.root]
7266                                         inst_pkg = self._pkg_cache[
7267                                                 ("installed", task.root, task.cpv, "nomerge")]
7268
7269                                         if self.digraph.contains(inst_pkg):
7270                                                 continue
7271
7272                                         forbid_overlap = False
7273                                         heuristic_overlap = False
7274                                         for blocker in myblocker_uninstalls.parent_nodes(task):
7275                                                 if blocker.eapi in ("0", "1"):
7276                                                         heuristic_overlap = True
7277                                                 elif blocker.atom.blocker.overlap.forbid:
7278                                                         forbid_overlap = True
7279                                                         break
7280                                         if forbid_overlap and running_root == task.root:
7281                                                 continue
7282
7283                                         if heuristic_overlap and running_root == task.root:
7284                                                 # Never uninstall sys-apps/portage or it's essential
7285                                                 # dependencies, except through replacement.
7286                                                 try:
7287                                                         runtime_dep_atoms = \
7288                                                                 list(runtime_deps.iterAtomsForPackage(task))
7289                                                 except portage.exception.InvalidDependString, e:
7290                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7291                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7292                                                                 (task.root, task.cpv, e), noiselevel=-1)
7293                                                         del e
7294                                                         continue
7295
7296                                                 # Don't uninstall a runtime dep if it appears
7297                                                 # to be the only suitable one installed.
7298                                                 skip = False
7299                                                 vardb = root_config.trees["vartree"].dbapi
7300                                                 for atom in runtime_dep_atoms:
7301                                                         other_version = None
7302                                                         for pkg in vardb.match_pkgs(atom):
7303                                                                 if pkg.cpv == task.cpv and \
7304                                                                         pkg.metadata["COUNTER"] == \
7305                                                                         task.metadata["COUNTER"]:
7306                                                                         continue
7307                                                                 other_version = pkg
7308                                                                 break
7309                                                         if other_version is None:
7310                                                                 skip = True
7311                                                                 break
7312                                                 if skip:
7313                                                         continue
7314
7315                                                 # For packages in the system set, don't take
7316                                                 # any chances. If the conflict can't be resolved
7317                                                 # by a normal replacement operation then abort.
7318                                                 skip = False
7319                                                 try:
7320                                                         for atom in root_config.sets[
7321                                                                 "system"].iterAtomsForPackage(task):
7322                                                                 skip = True
7323                                                                 break
7324                                                 except portage.exception.InvalidDependString, e:
7325                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7326                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7327                                                                 (task.root, task.cpv, e), noiselevel=-1)
7328                                                         del e
7329                                                         skip = True
7330                                                 if skip:
7331                                                         continue
7332
7333                                         # Note that the world check isn't always
7334                                         # necessary since self._complete_graph() will
7335                                         # add all packages from the system and world sets to the
7336                                         # graph. This just allows unresolved conflicts to be
7337                                         # detected as early as possible, which makes it possible
7338                                         # to avoid calling self._complete_graph() when it is
7339                                         # unnecessary due to blockers triggering an abortion.
7340                                         if not complete:
7341                                                 # For packages in the world set, go ahead an uninstall
7342                                                 # when necessary, as long as the atom will be satisfied
7343                                                 # in the final state.
7344                                                 graph_db = self.mydbapi[task.root]
7345                                                 skip = False
7346                                                 try:
7347                                                         for atom in root_config.sets[
7348                                                                 "world"].iterAtomsForPackage(task):
7349                                                                 satisfied = False
7350                                                                 for pkg in graph_db.match_pkgs(atom):
7351                                                                         if pkg == inst_pkg:
7352                                                                                 continue
7353                                                                         satisfied = True
7354                                                                         break
7355                                                                 if not satisfied:
7356                                                                         skip = True
7357                                                                         self._blocked_world_pkgs[inst_pkg] = atom
7358                                                                         break
7359                                                 except portage.exception.InvalidDependString, e:
7360                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7361                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7362                                                                 (task.root, task.cpv, e), noiselevel=-1)
7363                                                         del e
7364                                                         skip = True
7365                                                 if skip:
7366                                                         continue
7367
7368                                         # Check the deps of parent nodes to ensure that
7369                                         # the chosen task produces a leaf node. Maybe
7370                                         # this can be optimized some more to make the
7371                                         # best possible choice, but the current algorithm
7372                                         # is simple and should be near optimal for most
7373                                         # common cases.
7374                                         mergeable_parent = False
7375                                         parent_deps = set()
7376                                         for parent in mygraph.parent_nodes(task):
7377                                                 parent_deps.update(mygraph.child_nodes(parent,
7378                                                         ignore_priority=priority_range.ignore_medium_soft))
7379                                                 if parent in mergeable_nodes and \
7380                                                         gather_deps(ignore_uninst_or_med_soft,
7381                                                         mergeable_nodes, set(), parent):
7382                                                         mergeable_parent = True
7383
7384                                         if not mergeable_parent:
7385                                                 continue
7386
7387                                         parent_deps.remove(task)
7388                                         if min_parent_deps is None or \
7389                                                 len(parent_deps) < min_parent_deps:
7390                                                 min_parent_deps = len(parent_deps)
7391                                                 uninst_task = task
7392
7393                                 if uninst_task is not None:
7394                                         # The uninstall is performed only after blocking
7395                                         # packages have been merged on top of it. File
7396                                         # collisions between blocking packages are detected
7397                                         # and removed from the list of files to be uninstalled.
7398                                         scheduled_uninstalls.add(uninst_task)
7399                                         parent_nodes = mygraph.parent_nodes(uninst_task)
7400
7401                                         # Reverse the parent -> uninstall edges since we want
7402                                         # to do the uninstall after blocking packages have
7403                                         # been merged on top of it.
7404                                         mygraph.remove(uninst_task)
7405                                         for blocked_pkg in parent_nodes:
7406                                                 mygraph.add(blocked_pkg, uninst_task,
7407                                                         priority=BlockerDepPriority.instance)
7408                                                 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7409                                                 scheduler_graph.add(blocked_pkg, uninst_task,
7410                                                         priority=BlockerDepPriority.instance)
7411
7412                                         # Reset the state variables for leaf node selection and
7413                                         # continue trying to select leaf nodes.
7414                                         prefer_asap = True
7415                                         drop_satisfied = False
7416                                         continue
7417
7418                         if not selected_nodes:
7419                                 # Only select root nodes as a last resort. This case should
7420                                 # only trigger when the graph is nearly empty and the only
7421                                 # remaining nodes are isolated (no parents or children). Since
7422                                 # the nodes must be isolated, ignore_priority is not needed.
7423                                 selected_nodes = get_nodes()
7424
7425                         if not selected_nodes and not drop_satisfied:
7426                                 drop_satisfied = True
7427                                 continue
7428
7429                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7430                                 # If possible, drop an uninstall task here in order to avoid
7431                                 # the circular deps code path. The corresponding blocker will
7432                                 # still be counted as an unresolved conflict.
7433                                 uninst_task = None
7434                                 for node in myblocker_uninstalls.leaf_nodes():
7435                                         try:
7436                                                 mygraph.remove(node)
7437                                         except KeyError:
7438                                                 pass
7439                                         else:
7440                                                 uninst_task = node
7441                                                 ignored_uninstall_tasks.add(node)
7442                                                 break
7443
7444                                 if uninst_task is not None:
7445                                         # Reset the state variables for leaf node selection and
7446                                         # continue trying to select leaf nodes.
7447                                         prefer_asap = True
7448                                         drop_satisfied = False
7449                                         continue
7450
7451                         if not selected_nodes:
7452                                 self._circular_deps_for_display = mygraph
7453                                 raise self._unknown_internal_error()
7454
7455                         # At this point, we've succeeded in selecting one or more nodes, so
7456                         # reset state variables for leaf node selection.
7457                         prefer_asap = True
7458                         drop_satisfied = False
7459
7460                         mygraph.difference_update(selected_nodes)
7461
7462                         for node in selected_nodes:
7463                                 if isinstance(node, Package) and \
7464                                         node.operation == "nomerge":
7465                                         continue
7466
7467                                 # Handle interactions between blockers
7468                                 # and uninstallation tasks.
7469                                 solved_blockers = set()
7470                                 uninst_task = None
7471                                 if isinstance(node, Package) and \
7472                                         "uninstall" == node.operation:
7473                                         have_uninstall_task = True
7474                                         uninst_task = node
7475                                 else:
7476                                         vardb = self.trees[node.root]["vartree"].dbapi
7477                                         previous_cpv = vardb.match(node.slot_atom)
7478                                         if previous_cpv:
7479                                                 # The package will be replaced by this one, so remove
7480                                                 # the corresponding Uninstall task if necessary.
7481                                                 previous_cpv = previous_cpv[0]
7482                                                 uninst_task = \
7483                                                         ("installed", node.root, previous_cpv, "uninstall")
7484                                                 try:
7485                                                         mygraph.remove(uninst_task)
7486                                                 except KeyError:
7487                                                         pass
7488
7489                                 if uninst_task is not None and \
7490                                         uninst_task not in ignored_uninstall_tasks and \
7491                                         myblocker_uninstalls.contains(uninst_task):
7492                                         blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7493                                         myblocker_uninstalls.remove(uninst_task)
7494                                         # Discard any blockers that this Uninstall solves.
7495                                         for blocker in blocker_nodes:
7496                                                 if not myblocker_uninstalls.child_nodes(blocker):
7497                                                         myblocker_uninstalls.remove(blocker)
7498                                                         solved_blockers.add(blocker)
7499
7500                                 retlist.append(node)
7501
7502                                 if (isinstance(node, Package) and \
7503                                         "uninstall" == node.operation) or \
7504                                         (uninst_task is not None and \
7505                                         uninst_task in scheduled_uninstalls):
7506                                         # Include satisfied blockers in the merge list
7507                                         # since the user might be interested and also
7508                                         # it serves as an indicator that blocking packages
7509                                         # will be temporarily installed simultaneously.
7510                                         for blocker in solved_blockers:
7511                                                 retlist.append(Blocker(atom=blocker.atom,
7512                                                         root=blocker.root, eapi=blocker.eapi,
7513                                                         satisfied=True))
7514
7515                 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7516                 for node in myblocker_uninstalls.root_nodes():
7517                         unsolvable_blockers.add(node)
7518
7519                 for blocker in unsolvable_blockers:
7520                         retlist.append(blocker)
7521
7522                 # If any Uninstall tasks need to be executed in order
7523                 # to avoid a conflict, complete the graph with any
7524                 # dependencies that may have been initially
7525                 # neglected (to ensure that unsafe Uninstall tasks
7526                 # are properly identified and blocked from execution).
7527                 if have_uninstall_task and \
7528                         not complete and \
7529                         not unsolvable_blockers:
7530                         self.myparams.add("complete")
7531                         raise self._serialize_tasks_retry("")
7532
7533                 if unsolvable_blockers and \
7534                         not self._accept_blocker_conflicts():
7535                         self._unsatisfied_blockers_for_display = unsolvable_blockers
7536                         self._serialized_tasks_cache = retlist[:]
7537                         self._scheduler_graph = scheduler_graph
7538                         raise self._unknown_internal_error()
7539
7540                 if self._slot_collision_info and \
7541                         not self._accept_blocker_conflicts():
7542                         self._serialized_tasks_cache = retlist[:]
7543                         self._scheduler_graph = scheduler_graph
7544                         raise self._unknown_internal_error()
7545
7546                 return retlist, scheduler_graph
7547
7548         def _show_circular_deps(self, mygraph):
7549                 # No leaf nodes are available, so we have a circular
7550                 # dependency panic situation.  Reduce the noise level to a
7551                 # minimum via repeated elimination of root nodes since they
7552                 # have no parents and thus can not be part of a cycle.
7553                 while True:
7554                         root_nodes = mygraph.root_nodes(
7555                                 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
7556                         if not root_nodes:
7557                                 break
7558                         mygraph.difference_update(root_nodes)
7559                 # Display the USE flags that are enabled on nodes that are part
7560                 # of dependency cycles in case that helps the user decide to
7561                 # disable some of them.
7562                 display_order = []
7563                 tempgraph = mygraph.copy()
7564                 while not tempgraph.empty():
7565                         nodes = tempgraph.leaf_nodes()
7566                         if not nodes:
7567                                 node = tempgraph.order[0]
7568                         else:
7569                                 node = nodes[0]
7570                         display_order.append(node)
7571                         tempgraph.remove(node)
7572                 display_order.reverse()
7573                 self.myopts.pop("--quiet", None)
7574                 self.myopts.pop("--verbose", None)
7575                 self.myopts["--tree"] = True
7576                 portage.writemsg("\n\n", noiselevel=-1)
7577                 self.display(display_order)
7578                 prefix = colorize("BAD", " * ")
7579                 portage.writemsg("\n", noiselevel=-1)
7580                 portage.writemsg(prefix + "Error: circular dependencies:\n",
7581                         noiselevel=-1)
7582                 portage.writemsg("\n", noiselevel=-1)
7583                 mygraph.debug_print()
7584                 portage.writemsg("\n", noiselevel=-1)
7585                 portage.writemsg(prefix + "Note that circular dependencies " + \
7586                         "can often be avoided by temporarily\n", noiselevel=-1)
7587                 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7588                         "optional dependencies.\n", noiselevel=-1)
7589
7590         def _show_merge_list(self):
7591                 if self._serialized_tasks_cache is not None and \
7592                         not (self._displayed_list and \
7593                         (self._displayed_list == self._serialized_tasks_cache or \
7594                         self._displayed_list == \
7595                                 list(reversed(self._serialized_tasks_cache)))):
7596                         display_list = self._serialized_tasks_cache[:]
7597                         if "--tree" in self.myopts:
7598                                 display_list.reverse()
7599                         self.display(display_list)
7600
7601         def _show_unsatisfied_blockers(self, blockers):
7602                 self._show_merge_list()
7603                 msg = "Error: The above package list contains " + \
7604                         "packages which cannot be installed " + \
7605                         "at the same time on the same system."
7606                 prefix = colorize("BAD", " * ")
7607                 from textwrap import wrap
7608                 portage.writemsg("\n", noiselevel=-1)
7609                 for line in wrap(msg, 70):
7610                         portage.writemsg(prefix + line + "\n", noiselevel=-1)
7611
7612                 # Display the conflicting packages along with the packages
7613                 # that pulled them in. This is helpful for troubleshooting
7614                 # cases in which blockers don't solve automatically and
7615                 # the reasons are not apparent from the normal merge list
7616                 # display.
7617
7618                 conflict_pkgs = {}
7619                 for blocker in blockers:
7620                         for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7621                                 self._blocker_parents.parent_nodes(blocker)):
7622                                 parent_atoms = self._parent_atoms.get(pkg)
7623                                 if not parent_atoms:
7624                                         atom = self._blocked_world_pkgs.get(pkg)
7625                                         if atom is not None:
7626                                                 parent_atoms = set([("@world", atom)])
7627                                 if parent_atoms:
7628                                         conflict_pkgs[pkg] = parent_atoms
7629
7630                 if conflict_pkgs:
7631                         # Reduce noise by pruning packages that are only
7632                         # pulled in by other conflict packages.
7633                         pruned_pkgs = set()
7634                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7635                                 relevant_parent = False
7636                                 for parent, atom in parent_atoms:
7637                                         if parent not in conflict_pkgs:
7638                                                 relevant_parent = True
7639                                                 break
7640                                 if not relevant_parent:
7641                                         pruned_pkgs.add(pkg)
7642                         for pkg in pruned_pkgs:
7643                                 del conflict_pkgs[pkg]
7644
7645                 if conflict_pkgs:
7646                         msg = []
7647                         msg.append("\n")
7648                         indent = "  "
7649                         # Max number of parents shown, to avoid flooding the display.
7650                         max_parents = 3
7651                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7652
7653                                 pruned_list = set()
7654
7655                                 # Prefer packages that are not directly involved in a conflict.
7656                                 for parent_atom in parent_atoms:
7657                                         if len(pruned_list) >= max_parents:
7658                                                 break
7659                                         parent, atom = parent_atom
7660                                         if parent not in conflict_pkgs:
7661                                                 pruned_list.add(parent_atom)
7662
7663                                 for parent_atom in parent_atoms:
7664                                         if len(pruned_list) >= max_parents:
7665                                                 break
7666                                         pruned_list.add(parent_atom)
7667
7668                                 omitted_parents = len(parent_atoms) - len(pruned_list)
7669                                 msg.append(indent + "%s pulled in by\n" % pkg)
7670
7671                                 for parent_atom in pruned_list:
7672                                         parent, atom = parent_atom
7673                                         msg.append(2*indent)
7674                                         if isinstance(parent,
7675                                                 (PackageArg, AtomArg)):
7676                                                 # For PackageArg and AtomArg types, it's
7677                                                 # redundant to display the atom attribute.
7678                                                 msg.append(str(parent))
7679                                         else:
7680                                                 # Display the specific atom from SetArg or
7681                                                 # Package types.
7682                                                 msg.append("%s required by %s" % (atom, parent))
7683                                         msg.append("\n")
7684
7685                                 if omitted_parents:
7686                                         msg.append(2*indent)
7687                                         msg.append("(and %d more)\n" % omitted_parents)
7688
7689                                 msg.append("\n")
7690
7691                         sys.stderr.write("".join(msg))
7692                         sys.stderr.flush()
7693
7694                 if "--quiet" not in self.myopts:
7695                         show_blocker_docs_link()
7696
7697         def display(self, mylist, favorites=[], verbosity=None):
7698
7699                 # This is used to prevent display_problems() from
7700                 # redundantly displaying this exact same merge list
7701                 # again via _show_merge_list().
7702                 self._displayed_list = mylist
7703
7704                 if verbosity is None:
7705                         verbosity = ("--quiet" in self.myopts and 1 or \
7706                                 "--verbose" in self.myopts and 3 or 2)
7707                 favorites_set = InternalPackageSet(favorites)
7708                 oneshot = "--oneshot" in self.myopts or \
7709                         "--onlydeps" in self.myopts
7710                 columns = "--columns" in self.myopts
7711                 changelogs=[]
7712                 p=[]
7713                 blockers = []
7714
7715                 counters = PackageCounters()
7716
7717                 if verbosity == 1 and "--verbose" not in self.myopts:
7718                         def create_use_string(*args):
7719                                 return ""
7720                 else:
7721                         def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7722                                 old_iuse, old_use,
7723                                 is_new, reinst_flags,
7724                                 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7725                                 alphabetical=("--alphabetical" in self.myopts)):
7726                                 enabled = []
7727                                 if alphabetical:
7728                                         disabled = enabled
7729                                         removed = enabled
7730                                 else:
7731                                         disabled = []
7732                                         removed = []
7733                                 cur_iuse = set(cur_iuse)
7734                                 enabled_flags = cur_iuse.intersection(cur_use)
7735                                 removed_iuse = set(old_iuse).difference(cur_iuse)
7736                                 any_iuse = cur_iuse.union(old_iuse)
7737                                 any_iuse = list(any_iuse)
7738                                 any_iuse.sort()
7739                                 for flag in any_iuse:
7740                                         flag_str = None
7741                                         isEnabled = False
7742                                         reinst_flag = reinst_flags and flag in reinst_flags
7743                                         if flag in enabled_flags:
7744                                                 isEnabled = True
7745                                                 if is_new or flag in old_use and \
7746                                                         (all_flags or reinst_flag):
7747                                                         flag_str = red(flag)
7748                                                 elif flag not in old_iuse:
7749                                                         flag_str = yellow(flag) + "%*"
7750                                                 elif flag not in old_use:
7751                                                         flag_str = green(flag) + "*"
7752                                         elif flag in removed_iuse:
7753                                                 if all_flags or reinst_flag:
7754                                                         flag_str = yellow("-" + flag) + "%"
7755                                                         if flag in old_use:
7756                                                                 flag_str += "*"
7757                                                         flag_str = "(" + flag_str + ")"
7758                                                         removed.append(flag_str)
7759                                                 continue
7760                                         else:
7761                                                 if is_new or flag in old_iuse and \
7762                                                         flag not in old_use and \
7763                                                         (all_flags or reinst_flag):
7764                                                         flag_str = blue("-" + flag)
7765                                                 elif flag not in old_iuse:
7766                                                         flag_str = yellow("-" + flag)
7767                                                         if flag not in iuse_forced:
7768                                                                 flag_str += "%"
7769                                                 elif flag in old_use:
7770                                                         flag_str = green("-" + flag) + "*"
7771                                         if flag_str:
7772                                                 if flag in iuse_forced:
7773                                                         flag_str = "(" + flag_str + ")"
7774                                                 if isEnabled:
7775                                                         enabled.append(flag_str)
7776                                                 else:
7777                                                         disabled.append(flag_str)
7778
7779                                 if alphabetical:
7780                                         ret = " ".join(enabled)
7781                                 else:
7782                                         ret = " ".join(enabled + disabled + removed)
7783                                 if ret:
7784                                         ret = '%s="%s" ' % (name, ret)
7785                                 return ret
7786
7787                 repo_display = RepoDisplay(self.roots)
7788
7789                 tree_nodes = []
7790                 display_list = []
7791                 mygraph = self.digraph.copy()
7792
7793                 # If there are any Uninstall instances, add the corresponding
7794                 # blockers to the digraph (useful for --tree display).
7795
7796                 executed_uninstalls = set(node for node in mylist \
7797                         if isinstance(node, Package) and node.operation == "unmerge")
7798
7799                 for uninstall in self._blocker_uninstalls.leaf_nodes():
7800                         uninstall_parents = \
7801                                 self._blocker_uninstalls.parent_nodes(uninstall)
7802                         if not uninstall_parents:
7803                                 continue
7804
7805                         # Remove the corresponding "nomerge" node and substitute
7806                         # the Uninstall node.
7807                         inst_pkg = self._pkg_cache[
7808                                 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7809                         try:
7810                                 mygraph.remove(inst_pkg)
7811                         except KeyError:
7812                                 pass
7813
7814                         try:
7815                                 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7816                         except KeyError:
7817                                 inst_pkg_blockers = []
7818
7819                         # Break the Package -> Uninstall edges.
7820                         mygraph.remove(uninstall)
7821
7822                         # Resolution of a package's blockers
7823                         # depend on it's own uninstallation.
7824                         for blocker in inst_pkg_blockers:
7825                                 mygraph.add(uninstall, blocker)
7826
7827                         # Expand Package -> Uninstall edges into
7828                         # Package -> Blocker -> Uninstall edges.
7829                         for blocker in uninstall_parents:
7830                                 mygraph.add(uninstall, blocker)
7831                                 for parent in self._blocker_parents.parent_nodes(blocker):
7832                                         if parent != inst_pkg:
7833                                                 mygraph.add(blocker, parent)
7834
7835                         # If the uninstall task did not need to be executed because
7836                         # of an upgrade, display Blocker -> Upgrade edges since the
7837                         # corresponding Blocker -> Uninstall edges will not be shown.
7838                         upgrade_node = \
7839                                 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7840                         if upgrade_node is not None and \
7841                                 uninstall not in executed_uninstalls:
7842                                 for blocker in uninstall_parents:
7843                                         mygraph.add(upgrade_node, blocker)
7844
7845                 unsatisfied_blockers = []
7846                 i = 0
7847                 depth = 0
7848                 shown_edges = set()
7849                 for x in mylist:
7850                         if isinstance(x, Blocker) and not x.satisfied:
7851                                 unsatisfied_blockers.append(x)
7852                                 continue
7853                         graph_key = x
7854                         if "--tree" in self.myopts:
7855                                 depth = len(tree_nodes)
7856                                 while depth and graph_key not in \
7857                                         mygraph.child_nodes(tree_nodes[depth-1]):
7858                                                 depth -= 1
7859                                 if depth:
7860                                         tree_nodes = tree_nodes[:depth]
7861                                         tree_nodes.append(graph_key)
7862                                         display_list.append((x, depth, True))
7863                                         shown_edges.add((graph_key, tree_nodes[depth-1]))
7864                                 else:
7865                                         traversed_nodes = set() # prevent endless circles
7866                                         traversed_nodes.add(graph_key)
7867                                         def add_parents(current_node, ordered):
7868                                                 parent_nodes = None
7869                                                 # Do not traverse to parents if this node is an
7870                                                 # an argument or a direct member of a set that has
7871                                                 # been specified as an argument (system or world).
7872                                                 if current_node not in self._set_nodes:
7873                                                         parent_nodes = mygraph.parent_nodes(current_node)
7874                                                 if parent_nodes:
7875                                                         child_nodes = set(mygraph.child_nodes(current_node))
7876                                                         selected_parent = None
7877                                                         # First, try to avoid a direct cycle.
7878                                                         for node in parent_nodes:
7879                                                                 if not isinstance(node, (Blocker, Package)):
7880                                                                         continue
7881                                                                 if node not in traversed_nodes and \
7882                                                                         node not in child_nodes:
7883                                                                         edge = (current_node, node)
7884                                                                         if edge in shown_edges:
7885                                                                                 continue
7886                                                                         selected_parent = node
7887                                                                         break
7888                                                         if not selected_parent:
7889                                                                 # A direct cycle is unavoidable.
7890                                                                 for node in parent_nodes:
7891                                                                         if not isinstance(node, (Blocker, Package)):
7892                                                                                 continue
7893                                                                         if node not in traversed_nodes:
7894                                                                                 edge = (current_node, node)
7895                                                                                 if edge in shown_edges:
7896                                                                                         continue
7897                                                                                 selected_parent = node
7898                                                                                 break
7899                                                         if selected_parent:
7900                                                                 shown_edges.add((current_node, selected_parent))
7901                                                                 traversed_nodes.add(selected_parent)
7902                                                                 add_parents(selected_parent, False)
7903                                                 display_list.append((current_node,
7904                                                         len(tree_nodes), ordered))
7905                                                 tree_nodes.append(current_node)
7906                                         tree_nodes = []
7907                                         add_parents(graph_key, True)
7908                         else:
7909                                 display_list.append((x, depth, True))
7910                 mylist = display_list
7911                 for x in unsatisfied_blockers:
7912                         mylist.append((x, 0, True))
7913
7914                 last_merge_depth = 0
7915                 for i in xrange(len(mylist)-1,-1,-1):
7916                         graph_key, depth, ordered = mylist[i]
7917                         if not ordered and depth == 0 and i > 0 \
7918                                 and graph_key == mylist[i-1][0] and \
7919                                 mylist[i-1][1] == 0:
7920                                 # An ordered node got a consecutive duplicate when the tree was
7921                                 # being filled in.
7922                                 del mylist[i]
7923                                 continue
7924                         if ordered and graph_key[-1] != "nomerge":
7925                                 last_merge_depth = depth
7926                                 continue
7927                         if depth >= last_merge_depth or \
7928                                 i < len(mylist) - 1 and \
7929                                 depth >= mylist[i+1][1]:
7930                                         del mylist[i]
7931
7932                 from portage import flatten
7933                 from portage.dep import use_reduce, paren_reduce
7934                 # files to fetch list - avoids counting a same file twice
7935                 # in size display (verbose mode)
7936                 myfetchlist=[]
7937
7938                 # Use this set to detect when all the "repoadd" strings are "[0]"
7939                 # and disable the entire repo display in this case.
7940                 repoadd_set = set()
7941
7942                 for mylist_index in xrange(len(mylist)):
7943                         x, depth, ordered = mylist[mylist_index]
7944                         pkg_type = x[0]
7945                         myroot = x[1]
7946                         pkg_key = x[2]
7947                         portdb = self.trees[myroot]["porttree"].dbapi
7948                         bindb  = self.trees[myroot]["bintree"].dbapi
7949                         vardb = self.trees[myroot]["vartree"].dbapi
7950                         vartree = self.trees[myroot]["vartree"]
7951                         pkgsettings = self.pkgsettings[myroot]
7952
7953                         fetch=" "
7954                         indent = " " * depth
7955
7956                         if isinstance(x, Blocker):
7957                                 if x.satisfied:
7958                                         blocker_style = "PKG_BLOCKER_SATISFIED"
7959                                         addl = "%s  %s  " % (colorize(blocker_style, "b"), fetch)
7960                                 else:
7961                                         blocker_style = "PKG_BLOCKER"
7962                                         addl = "%s  %s  " % (colorize(blocker_style, "B"), fetch)
7963                                 if ordered:
7964                                         counters.blocks += 1
7965                                         if x.satisfied:
7966                                                 counters.blocks_satisfied += 1
7967                                 resolved = portage.key_expand(
7968                                         str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
7969                                 if "--columns" in self.myopts and "--quiet" in self.myopts:
7970                                         addl += " " + colorize(blocker_style, resolved)
7971                                 else:
7972                                         addl = "[%s %s] %s%s" % \
7973                                                 (colorize(blocker_style, "blocks"),
7974                                                 addl, indent, colorize(blocker_style, resolved))
7975                                 block_parents = self._blocker_parents.parent_nodes(x)
7976                                 block_parents = set([pnode[2] for pnode in block_parents])
7977                                 block_parents = ", ".join(block_parents)
7978                                 if resolved!=x[2]:
7979                                         addl += colorize(blocker_style,
7980                                                 " (\"%s\" is blocking %s)") % \
7981                                                 (str(x.atom).lstrip("!"), block_parents)
7982                                 else:
7983                                         addl += colorize(blocker_style,
7984                                                 " (is blocking %s)") % block_parents
7985                                 if isinstance(x, Blocker) and x.satisfied:
7986                                         if columns:
7987                                                 continue
7988                                         p.append(addl)
7989                                 else:
7990                                         blockers.append(addl)
7991                         else:
7992                                 pkg_status = x[3]
7993                                 pkg_merge = ordered and pkg_status == "merge"
7994                                 if not pkg_merge and pkg_status == "merge":
7995                                         pkg_status = "nomerge"
7996                                 built = pkg_type != "ebuild"
7997                                 installed = pkg_type == "installed"
7998                                 pkg = x
7999                                 metadata = pkg.metadata
8000                                 ebuild_path = None
8001                                 repo_name = metadata["repository"]
8002                                 if pkg_type == "ebuild":
8003                                         ebuild_path = portdb.findname(pkg_key)
8004                                         if not ebuild_path: # shouldn't happen
8005                                                 raise portage.exception.PackageNotFound(pkg_key)
8006                                         repo_path_real = os.path.dirname(os.path.dirname(
8007                                                 os.path.dirname(ebuild_path)))
8008                                 else:
8009                                         repo_path_real = portdb.getRepositoryPath(repo_name)
8010                                 pkg_use = list(pkg.use.enabled)
8011                                 try:
8012                                         restrict = flatten(use_reduce(paren_reduce(
8013                                                 pkg.metadata["RESTRICT"]), uselist=pkg_use))
8014                                 except portage.exception.InvalidDependString, e:
8015                                         if not pkg.installed:
8016                                                 show_invalid_depstring_notice(x,
8017                                                         pkg.metadata["RESTRICT"], str(e))
8018                                                 del e
8019                                                 return 1
8020                                         restrict = []
8021                                 if "ebuild" == pkg_type and x[3] != "nomerge" and \
8022                                         "fetch" in restrict:
8023                                         fetch = red("F")
8024                                         if ordered:
8025                                                 counters.restrict_fetch += 1
8026                                         if portdb.fetch_check(pkg_key, pkg_use):
8027                                                 fetch = green("f")
8028                                                 if ordered:
8029                                                         counters.restrict_fetch_satisfied += 1
8030
8031                                 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
8032                                 #param is used for -u, where you still *do* want to see when something is being upgraded.
8033                                 myoldbest = []
8034                                 myinslotlist = None
8035                                 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
8036                                 if vardb.cpv_exists(pkg_key):
8037                                         addl="  "+yellow("R")+fetch+"  "
8038                                         if ordered:
8039                                                 if pkg_merge:
8040                                                         counters.reinst += 1
8041                                                 elif pkg_status == "uninstall":
8042                                                         counters.uninst += 1
8043                                 # filter out old-style virtual matches
8044                                 elif installed_versions and \
8045                                         portage.cpv_getkey(installed_versions[0]) == \
8046                                         portage.cpv_getkey(pkg_key):
8047                                         myinslotlist = vardb.match(pkg.slot_atom)
8048                                         # If this is the first install of a new-style virtual, we
8049                                         # need to filter out old-style virtual matches.
8050                                         if myinslotlist and \
8051                                                 portage.cpv_getkey(myinslotlist[0]) != \
8052                                                 portage.cpv_getkey(pkg_key):
8053                                                 myinslotlist = None
8054                                         if myinslotlist:
8055                                                 myoldbest = myinslotlist[:]
8056                                                 addl = "   " + fetch
8057                                                 if not portage.dep.cpvequal(pkg_key,
8058                                                         portage.best([pkg_key] + myoldbest)):
8059                                                         # Downgrade in slot
8060                                                         addl += turquoise("U")+blue("D")
8061                                                         if ordered:
8062                                                                 counters.downgrades += 1
8063                                                 else:
8064                                                         # Update in slot
8065                                                         addl += turquoise("U") + " "
8066                                                         if ordered:
8067                                                                 counters.upgrades += 1
8068                                         else:
8069                                                 # New slot, mark it new.
8070                                                 addl = " " + green("NS") + fetch + "  "
8071                                                 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
8072                                                 if ordered:
8073                                                         counters.newslot += 1
8074
8075                                         if "--changelog" in self.myopts:
8076                                                 inst_matches = vardb.match(pkg.slot_atom)
8077                                                 if inst_matches:
8078                                                         changelogs.extend(self.calc_changelog(
8079                                                                 portdb.findname(pkg_key),
8080                                                                 inst_matches[0], pkg_key))
8081                                 else:
8082                                         addl = " " + green("N") + " " + fetch + "  "
8083                                         if ordered:
8084                                                 counters.new += 1
8085
8086                                 verboseadd = ""
8087                                 repoadd = None
8088
8089                                 if True:
8090                                         # USE flag display
8091                                         forced_flags = set()
8092                                         pkgsettings.setcpv(pkg) # for package.use.{mask,force}
8093                                         forced_flags.update(pkgsettings.useforce)
8094                                         forced_flags.update(pkgsettings.usemask)
8095
8096                                         cur_use = [flag for flag in pkg.use.enabled \
8097                                                 if flag in pkg.iuse.all]
8098                                         cur_iuse = sorted(pkg.iuse.all)
8099
8100                                         if myoldbest and myinslotlist:
8101                                                 previous_cpv = myoldbest[0]
8102                                         else:
8103                                                 previous_cpv = pkg.cpv
8104                                         if vardb.cpv_exists(previous_cpv):
8105                                                 old_iuse, old_use = vardb.aux_get(
8106                                                                 previous_cpv, ["IUSE", "USE"])
8107                                                 old_iuse = list(set(
8108                                                         filter_iuse_defaults(old_iuse.split())))
8109                                                 old_iuse.sort()
8110                                                 old_use = old_use.split()
8111                                                 is_new = False
8112                                         else:
8113                                                 old_iuse = []
8114                                                 old_use = []
8115                                                 is_new = True
8116
8117                                         old_use = [flag for flag in old_use if flag in old_iuse]
8118
8119                                         use_expand = pkgsettings["USE_EXPAND"].lower().split()
8120                                         use_expand.sort()
8121                                         use_expand.reverse()
8122                                         use_expand_hidden = \
8123                                                 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
8124
8125                                         def map_to_use_expand(myvals, forcedFlags=False,
8126                                                 removeHidden=True):
8127                                                 ret = {}
8128                                                 forced = {}
8129                                                 for exp in use_expand:
8130                                                         ret[exp] = []
8131                                                         forced[exp] = set()
8132                                                         for val in myvals[:]:
8133                                                                 if val.startswith(exp.lower()+"_"):
8134                                                                         if val in forced_flags:
8135                                                                                 forced[exp].add(val[len(exp)+1:])
8136                                                                         ret[exp].append(val[len(exp)+1:])
8137                                                                         myvals.remove(val)
8138                                                 ret["USE"] = myvals
8139                                                 forced["USE"] = [val for val in myvals \
8140                                                         if val in forced_flags]
8141                                                 if removeHidden:
8142                                                         for exp in use_expand_hidden:
8143                                                                 ret.pop(exp, None)
8144                                                 if forcedFlags:
8145                                                         return ret, forced
8146                                                 return ret
8147
8148                                         # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
8149                                         # are the only thing that triggered reinstallation.
8150                                         reinst_flags_map = {}
8151                                         reinstall_for_flags = self._reinstall_nodes.get(pkg)
8152                                         reinst_expand_map = None
8153                                         if reinstall_for_flags:
8154                                                 reinst_flags_map = map_to_use_expand(
8155                                                         list(reinstall_for_flags), removeHidden=False)
8156                                                 for k in list(reinst_flags_map):
8157                                                         if not reinst_flags_map[k]:
8158                                                                 del reinst_flags_map[k]
8159                                                 if not reinst_flags_map.get("USE"):
8160                                                         reinst_expand_map = reinst_flags_map.copy()
8161                                                         reinst_expand_map.pop("USE", None)
8162                                         if reinst_expand_map and \
8163                                                 not set(reinst_expand_map).difference(
8164                                                 use_expand_hidden):
8165                                                 use_expand_hidden = \
8166                                                         set(use_expand_hidden).difference(
8167                                                         reinst_expand_map)
8168
8169                                         cur_iuse_map, iuse_forced = \
8170                                                 map_to_use_expand(cur_iuse, forcedFlags=True)
8171                                         cur_use_map = map_to_use_expand(cur_use)
8172                                         old_iuse_map = map_to_use_expand(old_iuse)
8173                                         old_use_map = map_to_use_expand(old_use)
8174
8175                                         use_expand.sort()
8176                                         use_expand.insert(0, "USE")
8177                                         
8178                                         for key in use_expand:
8179                                                 if key in use_expand_hidden:
8180                                                         continue
8181                                                 verboseadd += create_use_string(key.upper(),
8182                                                         cur_iuse_map[key], iuse_forced[key],
8183                                                         cur_use_map[key], old_iuse_map[key],
8184                                                         old_use_map[key], is_new,
8185                                                         reinst_flags_map.get(key))
8186
8187                                 if verbosity == 3:
8188                                         # size verbose
8189                                         mysize=0
8190                                         if pkg_type == "ebuild" and pkg_merge:
8191                                                 try:
8192                                                         myfilesdict = portdb.getfetchsizes(pkg_key,
8193                                                                 useflags=pkg_use, debug=self.edebug)
8194                                                 except portage.exception.InvalidDependString, e:
8195                                                         src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
8196                                                         show_invalid_depstring_notice(x, src_uri, str(e))
8197                                                         del e
8198                                                         return 1
8199                                                 if myfilesdict is None:
8200                                                         myfilesdict="[empty/missing/bad digest]"
8201                                                 else:
8202                                                         for myfetchfile in myfilesdict:
8203                                                                 if myfetchfile not in myfetchlist:
8204                                                                         mysize+=myfilesdict[myfetchfile]
8205                                                                         myfetchlist.append(myfetchfile)
8206                                                         if ordered:
8207                                                                 counters.totalsize += mysize
8208                                                 verboseadd += format_size(mysize)
8209
8210                                         # overlay verbose
8211                                         # assign index for a previous version in the same slot
8212                                         has_previous = False
8213                                         repo_name_prev = None
8214                                         slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
8215                                                 metadata["SLOT"])
8216                                         slot_matches = vardb.match(slot_atom)
8217                                         if slot_matches:
8218                                                 has_previous = True
8219                                                 repo_name_prev = vardb.aux_get(slot_matches[0],
8220                                                         ["repository"])[0]
8221
8222                                         # now use the data to generate output
8223                                         if pkg.installed or not has_previous:
8224                                                 repoadd = repo_display.repoStr(repo_path_real)
8225                                         else:
8226                                                 repo_path_prev = None
8227                                                 if repo_name_prev:
8228                                                         repo_path_prev = portdb.getRepositoryPath(
8229                                                                 repo_name_prev)
8230                                                 if repo_path_prev == repo_path_real:
8231                                                         repoadd = repo_display.repoStr(repo_path_real)
8232                                                 else:
8233                                                         repoadd = "%s=>%s" % (
8234                                                                 repo_display.repoStr(repo_path_prev),
8235                                                                 repo_display.repoStr(repo_path_real))
8236                                         if repoadd:
8237                                                 repoadd_set.add(repoadd)
8238
8239                                 xs = [portage.cpv_getkey(pkg_key)] + \
8240                                         list(portage.catpkgsplit(pkg_key)[2:])
8241                                 if xs[2] == "r0":
8242                                         xs[2] = ""
8243                                 else:
8244                                         xs[2] = "-" + xs[2]
8245
8246                                 mywidth = 130
8247                                 if "COLUMNWIDTH" in self.settings:
8248                                         try:
8249                                                 mywidth = int(self.settings["COLUMNWIDTH"])
8250                                         except ValueError, e:
8251                                                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8252                                                 portage.writemsg(
8253                                                         "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
8254                                                         self.settings["COLUMNWIDTH"], noiselevel=-1)
8255                                                 del e
8256                                 oldlp = mywidth - 30
8257                                 newlp = oldlp - 30
8258
8259                                 # Convert myoldbest from a list to a string.
8260                                 if not myoldbest:
8261                                         myoldbest = ""
8262                                 else:
8263                                         for pos, key in enumerate(myoldbest):
8264                                                 key = portage.catpkgsplit(key)[2] + \
8265                                                         "-" + portage.catpkgsplit(key)[3]
8266                                                 if key[-3:] == "-r0":
8267                                                         key = key[:-3]
8268                                                 myoldbest[pos] = key
8269                                         myoldbest = blue("["+", ".join(myoldbest)+"]")
8270
8271                                 pkg_cp = xs[0]
8272                                 root_config = self.roots[myroot]
8273                                 system_set = root_config.sets["system"]
8274                                 world_set  = root_config.sets["world"]
8275
8276                                 pkg_system = False
8277                                 pkg_world = False
8278                                 try:
8279                                         pkg_system = system_set.findAtomForPackage(pkg)
8280                                         pkg_world  = world_set.findAtomForPackage(pkg)
8281                                         if not (oneshot or pkg_world) and \
8282                                                 myroot == self.target_root and \
8283                                                 favorites_set.findAtomForPackage(pkg):
8284                                                 # Maybe it will be added to world now.
8285                                                 if create_world_atom(pkg, favorites_set, root_config):
8286                                                         pkg_world = True
8287                                 except portage.exception.InvalidDependString:
8288                                         # This is reported elsewhere if relevant.
8289                                         pass
8290
8291                                 def pkgprint(pkg_str):
8292                                         if pkg_merge:
8293                                                 if pkg_system:
8294                                                         return colorize("PKG_MERGE_SYSTEM", pkg_str)
8295                                                 elif pkg_world:
8296                                                         return colorize("PKG_MERGE_WORLD", pkg_str)
8297                                                 else:
8298                                                         return colorize("PKG_MERGE", pkg_str)
8299                                         elif pkg_status == "uninstall":
8300                                                 return colorize("PKG_UNINSTALL", pkg_str)
8301                                         else:
8302                                                 if pkg_system:
8303                                                         return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8304                                                 elif pkg_world:
8305                                                         return colorize("PKG_NOMERGE_WORLD", pkg_str)
8306                                                 else:
8307                                                         return colorize("PKG_NOMERGE", pkg_str)
8308
8309                                 try:
8310                                         properties = flatten(use_reduce(paren_reduce(
8311                                                 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8312                                 except portage.exception.InvalidDependString, e:
8313                                         if not pkg.installed:
8314                                                 show_invalid_depstring_notice(pkg,
8315                                                         pkg.metadata["PROPERTIES"], str(e))
8316                                                 del e
8317                                                 return 1
8318                                         properties = []
8319                                 interactive = "interactive" in properties
8320                                 if interactive and pkg.operation == "merge":
8321                                         addl = colorize("WARN", "I") + addl[1:]
8322                                         if ordered:
8323                                                 counters.interactive += 1
8324
8325                                 if x[1]!="/":
8326                                         if myoldbest:
8327                                                 myoldbest +=" "
8328                                         if "--columns" in self.myopts:
8329                                                 if "--quiet" in self.myopts:
8330                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8331                                                         myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8332                                                         myprint=myprint+myoldbest
8333                                                         myprint=myprint+darkgreen("to "+x[1])
8334                                                         verboseadd = None
8335                                                 else:
8336                                                         if not pkg_merge:
8337                                                                 myprint = "[%s] %s%s" % \
8338                                                                         (pkgprint(pkg_status.ljust(13)),
8339                                                                         indent, pkgprint(pkg.cp))
8340                                                         else:
8341                                                                 myprint = "[%s %s] %s%s" % \
8342                                                                         (pkgprint(pkg.type_name), addl,
8343                                                                         indent, pkgprint(pkg.cp))
8344                                                         if (newlp-nc_len(myprint)) > 0:
8345                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8346                                                         myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8347                                                         if (oldlp-nc_len(myprint)) > 0:
8348                                                                 myprint=myprint+" "*(oldlp-nc_len(myprint))
8349                                                         myprint=myprint+myoldbest
8350                                                         myprint += darkgreen("to " + pkg.root)
8351                                         else:
8352                                                 if not pkg_merge:
8353                                                         myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8354                                                 else:
8355                                                         myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
8356                                                 myprint += indent + pkgprint(pkg_key) + " " + \
8357                                                         myoldbest + darkgreen("to " + myroot)
8358                                 else:
8359                                         if "--columns" in self.myopts:
8360                                                 if "--quiet" in self.myopts:
8361                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8362                                                         myprint=myprint+" "+green(xs[1]+xs[2])+" "
8363                                                         myprint=myprint+myoldbest
8364                                                         verboseadd = None
8365                                                 else:
8366                                                         if not pkg_merge:
8367                                                                 myprint = "[%s] %s%s" % \
8368                                                                         (pkgprint(pkg_status.ljust(13)),
8369                                                                         indent, pkgprint(pkg.cp))
8370                                                         else:
8371                                                                 myprint = "[%s %s] %s%s" % \
8372                                                                         (pkgprint(pkg.type_name), addl,
8373                                                                         indent, pkgprint(pkg.cp))
8374                                                         if (newlp-nc_len(myprint)) > 0:
8375                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8376                                                         myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8377                                                         if (oldlp-nc_len(myprint)) > 0:
8378                                                                 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8379                                                         myprint += myoldbest
8380                                         else:
8381                                                 if not pkg_merge:
8382                                                         myprint = "[%s] %s%s %s" % \
8383                                                                 (pkgprint(pkg_status.ljust(13)),
8384                                                                 indent, pkgprint(pkg.cpv),
8385                                                                 myoldbest)
8386                                                 else:
8387                                                         myprint = "[%s %s] %s%s %s" % \
8388                                                                 (pkgprint(pkg_type), addl, indent,
8389                                                                 pkgprint(pkg.cpv), myoldbest)
8390
8391                                 if columns and pkg.operation == "uninstall":
8392                                         continue
8393                                 p.append((myprint, verboseadd, repoadd))
8394
8395                                 if "--tree" not in self.myopts and \
8396                                         "--quiet" not in self.myopts and \
8397                                         not self._opts_no_restart.intersection(self.myopts) and \
8398                                         pkg.root == self._running_root.root and \
8399                                         portage.match_from_list(
8400                                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8401                                         not vardb.cpv_exists(pkg.cpv) and \
8402                                         "--quiet" not in self.myopts:
8403                                                 if mylist_index < len(mylist) - 1:
8404                                                         p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8405                                                         p.append(colorize("WARN", "    then resume the merge."))
8406
8407                 out = sys.stdout
8408                 show_repos = repoadd_set and repoadd_set != set(["0"])
8409
8410                 for x in p:
8411                         if isinstance(x, basestring):
8412                                 out.write("%s\n" % (x,))
8413                                 continue
8414
8415                         myprint, verboseadd, repoadd = x
8416
8417                         if verboseadd:
8418                                 myprint += " " + verboseadd
8419
8420                         if show_repos and repoadd:
8421                                 myprint += " " + teal("[%s]" % repoadd)
8422
8423                         out.write("%s\n" % (myprint,))
8424
8425                 for x in blockers:
8426                         print x
8427
8428                 if verbosity == 3:
8429                         print
8430                         print counters
8431                         if show_repos:
8432                                 sys.stdout.write(str(repo_display))
8433
8434                 if "--changelog" in self.myopts:
8435                         print
8436                         for revision,text in changelogs:
8437                                 print bold('*'+revision)
8438                                 sys.stdout.write(text)
8439
8440                 sys.stdout.flush()
8441                 return os.EX_OK
8442
8443         def display_problems(self):
8444                 """
8445                 Display problems with the dependency graph such as slot collisions.
8446                 This is called internally by display() to show the problems _after_
8447                 the merge list where it is most likely to be seen, but if display()
8448                 is not going to be called then this method should be called explicitly
8449                 to ensure that the user is notified of problems with the graph.
8450
8451                 All output goes to stderr, except for unsatisfied dependencies which
8452                 go to stdout for parsing by programs such as autounmask.
8453                 """
8454
8455                 # Note that show_masked_packages() sends it's output to
8456                 # stdout, and some programs such as autounmask parse the
8457                 # output in cases when emerge bails out. However, when
8458                 # show_masked_packages() is called for installed packages
8459                 # here, the message is a warning that is more appropriate
8460                 # to send to stderr, so temporarily redirect stdout to
8461                 # stderr. TODO: Fix output code so there's a cleaner way
8462                 # to redirect everything to stderr.
8463                 sys.stdout.flush()
8464                 sys.stderr.flush()
8465                 stdout = sys.stdout
8466                 try:
8467                         sys.stdout = sys.stderr
8468                         self._display_problems()
8469                 finally:
8470                         sys.stdout = stdout
8471                         sys.stdout.flush()
8472                         sys.stderr.flush()
8473
8474                 # This goes to stdout for parsing by programs like autounmask.
8475                 for pargs, kwargs in self._unsatisfied_deps_for_display:
8476                         self._show_unsatisfied_dep(*pargs, **kwargs)
8477
8478         def _display_problems(self):
8479                 if self._circular_deps_for_display is not None:
8480                         self._show_circular_deps(
8481                                 self._circular_deps_for_display)
8482
8483                 # The user is only notified of a slot conflict if
8484                 # there are no unresolvable blocker conflicts.
8485                 if self._unsatisfied_blockers_for_display is not None:
8486                         self._show_unsatisfied_blockers(
8487                                 self._unsatisfied_blockers_for_display)
8488                 else:
8489                         self._show_slot_collision_notice()
8490
8491                 # TODO: Add generic support for "set problem" handlers so that
8492                 # the below warnings aren't special cases for world only.
8493
8494                 if self._missing_args:
8495                         world_problems = False
8496                         if "world" in self._sets:
8497                                 # Filter out indirect members of world (from nested sets)
8498                                 # since only direct members of world are desired here.
8499                                 world_set = self.roots[self.target_root].sets["world"]
8500                                 for arg, atom in self._missing_args:
8501                                         if arg.name == "world" and atom in world_set:
8502                                                 world_problems = True
8503                                                 break
8504
8505                         if world_problems:
8506                                 sys.stderr.write("\n!!! Problems have been " + \
8507                                         "detected with your world file\n")
8508                                 sys.stderr.write("!!! Please run " + \
8509                                         green("emaint --check world")+"\n\n")
8510
8511                 if self._missing_args:
8512                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8513                                 " Ebuilds for the following packages are either all\n")
8514                         sys.stderr.write(colorize("BAD", "!!!") + \
8515                                 " masked or don't exist:\n")
8516                         sys.stderr.write(" ".join(str(atom) for arg, atom in \
8517                                 self._missing_args) + "\n")
8518
8519                 if self._pprovided_args:
8520                         arg_refs = {}
8521                         for arg, atom in self._pprovided_args:
8522                                 if isinstance(arg, SetArg):
8523                                         parent = arg.name
8524                                         arg_atom = (atom, atom)
8525                                 else:
8526                                         parent = "args"
8527                                         arg_atom = (arg.arg, atom)
8528                                 refs = arg_refs.setdefault(arg_atom, [])
8529                                 if parent not in refs:
8530                                         refs.append(parent)
8531                         msg = []
8532                         msg.append(bad("\nWARNING: "))
8533                         if len(self._pprovided_args) > 1:
8534                                 msg.append("Requested packages will not be " + \
8535                                         "merged because they are listed in\n")
8536                         else:
8537                                 msg.append("A requested package will not be " + \
8538                                         "merged because it is listed in\n")
8539                         msg.append("package.provided:\n\n")
8540                         problems_sets = set()
8541                         for (arg, atom), refs in arg_refs.iteritems():
8542                                 ref_string = ""
8543                                 if refs:
8544                                         problems_sets.update(refs)
8545                                         refs.sort()
8546                                         ref_string = ", ".join(["'%s'" % name for name in refs])
8547                                         ref_string = " pulled in by " + ref_string
8548                                 msg.append("  %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8549                         msg.append("\n")
8550                         if "world" in problems_sets:
8551                                 msg.append("This problem can be solved in one of the following ways:\n\n")
8552                                 msg.append("  A) Use emaint to clean offending packages from world (if not installed).\n")
8553                                 msg.append("  B) Uninstall offending packages (cleans them from world).\n")
8554                                 msg.append("  C) Remove offending entries from package.provided.\n\n")
8555                                 msg.append("The best course of action depends on the reason that an offending\n")
8556                                 msg.append("package.provided entry exists.\n\n")
8557                         sys.stderr.write("".join(msg))
8558
8559                 masked_packages = []
8560                 for pkg in self._masked_installed:
8561                         root_config = pkg.root_config
8562                         pkgsettings = self.pkgsettings[pkg.root]
8563                         mreasons = get_masking_status(pkg, pkgsettings, root_config)
8564                         masked_packages.append((root_config, pkgsettings,
8565                                 pkg.cpv, pkg.metadata, mreasons))
8566                 if masked_packages:
8567                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8568                                 " The following installed packages are masked:\n")
8569                         show_masked_packages(masked_packages)
8570                         show_mask_docs()
8571                         print
8572
8573         def calc_changelog(self,ebuildpath,current,next):
8574                 if ebuildpath == None or not os.path.exists(ebuildpath):
8575                         return []
8576                 current = '-'.join(portage.catpkgsplit(current)[1:])
8577                 if current.endswith('-r0'):
8578                         current = current[:-3]
8579                 next = '-'.join(portage.catpkgsplit(next)[1:])
8580                 if next.endswith('-r0'):
8581                         next = next[:-3]
8582                 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8583                 try:
8584                         changelog = open(changelogpath).read()
8585                 except SystemExit, e:
8586                         raise # Needed else can't exit
8587                 except:
8588                         return []
8589                 divisions = self.find_changelog_tags(changelog)
8590                 #print 'XX from',current,'to',next
8591                 #for div,text in divisions: print 'XX',div
8592                 # skip entries for all revisions above the one we are about to emerge
8593                 for i in range(len(divisions)):
8594                         if divisions[i][0]==next:
8595                                 divisions = divisions[i:]
8596                                 break
8597                 # find out how many entries we are going to display
8598                 for i in range(len(divisions)):
8599                         if divisions[i][0]==current:
8600                                 divisions = divisions[:i]
8601                                 break
8602                 else:
8603                     # couldnt find the current revision in the list. display nothing
8604                         return []
8605                 return divisions
8606
8607         def find_changelog_tags(self,changelog):
8608                 divs = []
8609                 release = None
8610                 while 1:
8611                         match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8612                         if match is None:
8613                                 if release is not None:
8614                                         divs.append((release,changelog))
8615                                 return divs
8616                         if release is not None:
8617                                 divs.append((release,changelog[:match.start()]))
8618                         changelog = changelog[match.end():]
8619                         release = match.group(1)
8620                         if release.endswith('.ebuild'):
8621                                 release = release[:-7]
8622                         if release.endswith('-r0'):
8623                                 release = release[:-3]
8624
8625         def saveNomergeFavorites(self):
8626                 """Find atoms in favorites that are not in the mergelist and add them
8627                 to the world file if necessary."""
8628                 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8629                         "--oneshot", "--onlydeps", "--pretend"):
8630                         if x in self.myopts:
8631                                 return
8632                 root_config = self.roots[self.target_root]
8633                 world_set = root_config.sets["world"]
8634
8635                 world_locked = False
8636                 if hasattr(world_set, "lock"):
8637                         world_set.lock()
8638                         world_locked = True
8639
8640                 if hasattr(world_set, "load"):
8641                         world_set.load() # maybe it's changed on disk
8642
8643                 args_set = self._sets["args"]
8644                 portdb = self.trees[self.target_root]["porttree"].dbapi
8645                 added_favorites = set()
8646                 for x in self._set_nodes:
8647                         pkg_type, root, pkg_key, pkg_status = x
8648                         if pkg_status != "nomerge":
8649                                 continue
8650
8651                         try:
8652                                 myfavkey = create_world_atom(x, args_set, root_config)
8653                                 if myfavkey:
8654                                         if myfavkey in added_favorites:
8655                                                 continue
8656                                         added_favorites.add(myfavkey)
8657                         except portage.exception.InvalidDependString, e:
8658                                 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8659                                         (pkg_key, str(e)), noiselevel=-1)
8660                                 writemsg("!!! see '%s'\n\n" % os.path.join(
8661                                         root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8662                                 del e
8663                 all_added = []
8664                 for k in self._sets:
8665                         if k in ("args", "world") or not root_config.sets[k].world_candidate:
8666                                 continue
8667                         s = SETPREFIX + k
8668                         if s in world_set:
8669                                 continue
8670                         all_added.append(SETPREFIX + k)
8671                 all_added.extend(added_favorites)
8672                 all_added.sort()
8673                 for a in all_added:
8674                         print ">>> Recording %s in \"world\" favorites file..." % \
8675                                 colorize("INFORM", str(a))
8676                 if all_added:
8677                         world_set.update(all_added)
8678
8679                 if world_locked:
8680                         world_set.unlock()
8681
8682         def loadResumeCommand(self, resume_data, skip_masked=False):
8683                 """
8684                 Add a resume command to the graph and validate it in the process.  This
8685                 will raise a PackageNotFound exception if a package is not available.
8686                 """
8687
8688                 if not isinstance(resume_data, dict):
8689                         return False
8690
8691                 mergelist = resume_data.get("mergelist")
8692                 if not isinstance(mergelist, list):
8693                         mergelist = []
8694
8695                 fakedb = self.mydbapi
8696                 trees = self.trees
8697                 serialized_tasks = []
8698                 masked_tasks = []
8699                 for x in mergelist:
8700                         if not (isinstance(x, list) and len(x) == 4):
8701                                 continue
8702                         pkg_type, myroot, pkg_key, action = x
8703                         if pkg_type not in self.pkg_tree_map:
8704                                 continue
8705                         if action != "merge":
8706                                 continue
8707                         tree_type = self.pkg_tree_map[pkg_type]
8708                         mydb = trees[myroot][tree_type].dbapi
8709                         db_keys = list(self._trees_orig[myroot][
8710                                 tree_type].dbapi._aux_cache_keys)
8711                         try:
8712                                 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8713                         except KeyError:
8714                                 # It does no exist or it is corrupt.
8715                                 if action == "uninstall":
8716                                         continue
8717                                 raise portage.exception.PackageNotFound(pkg_key)
8718                         installed = action == "uninstall"
8719                         built = pkg_type != "ebuild"
8720                         root_config = self.roots[myroot]
8721                         pkg = Package(built=built, cpv=pkg_key,
8722                                 installed=installed, metadata=metadata,
8723                                 operation=action, root_config=root_config,
8724                                 type_name=pkg_type)
8725                         if pkg_type == "ebuild":
8726                                 pkgsettings = self.pkgsettings[myroot]
8727                                 pkgsettings.setcpv(pkg)
8728                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8729                                 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
8730                         self._pkg_cache[pkg] = pkg
8731
8732                         root_config = self.roots[pkg.root]
8733                         if "merge" == pkg.operation and \
8734                                 not visible(root_config.settings, pkg):
8735                                 if skip_masked:
8736                                         masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8737                                 else:
8738                                         self._unsatisfied_deps_for_display.append(
8739                                                 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8740
8741                         fakedb[myroot].cpv_inject(pkg)
8742                         serialized_tasks.append(pkg)
8743                         self.spinner.update()
8744
8745                 if self._unsatisfied_deps_for_display:
8746                         return False
8747
8748                 if not serialized_tasks or "--nodeps" in self.myopts:
8749                         self._serialized_tasks_cache = serialized_tasks
8750                         self._scheduler_graph = self.digraph
8751                 else:
8752                         self._select_package = self._select_pkg_from_graph
8753                         self.myparams.add("selective")
8754                         # Always traverse deep dependencies in order to account for
8755                         # potentially unsatisfied dependencies of installed packages.
8756                         # This is necessary for correct --keep-going or --resume operation
8757                         # in case a package from a group of circularly dependent packages
8758                         # fails. In this case, a package which has recently been installed
8759                         # may have an unsatisfied circular dependency (pulled in by
8760                         # PDEPEND, for example). So, even though a package is already
8761                         # installed, it may not have all of it's dependencies satisfied, so
8762                         # it may not be usable. If such a package is in the subgraph of
8763                         # deep depenedencies of a scheduled build, that build needs to
8764                         # be cancelled. In order for this type of situation to be
8765                         # recognized, deep traversal of dependencies is required.
8766                         self.myparams.add("deep")
8767
8768                         favorites = resume_data.get("favorites")
8769                         args_set = self._sets["args"]
8770                         if isinstance(favorites, list):
8771                                 args = self._load_favorites(favorites)
8772                         else:
8773                                 args = []
8774
8775                         for task in serialized_tasks:
8776                                 if isinstance(task, Package) and \
8777                                         task.operation == "merge":
8778                                         if not self._add_pkg(task, None):
8779                                                 return False
8780
8781                         # Packages for argument atoms need to be explicitly
8782                         # added via _add_pkg() so that they are included in the
8783                         # digraph (needed at least for --tree display).
8784                         for arg in args:
8785                                 for atom in arg.set:
8786                                         pkg, existing_node = self._select_package(
8787                                                 arg.root_config.root, atom)
8788                                         if existing_node is None and \
8789                                                 pkg is not None:
8790                                                 if not self._add_pkg(pkg, Dependency(atom=atom,
8791                                                         root=pkg.root, parent=arg)):
8792                                                         return False
8793
8794                         # Allow unsatisfied deps here to avoid showing a masking
8795                         # message for an unsatisfied dep that isn't necessarily
8796                         # masked.
8797                         if not self._create_graph(allow_unsatisfied=True):
8798                                 return False
8799
8800                         unsatisfied_deps = []
8801                         for dep in self._unsatisfied_deps:
8802                                 if not isinstance(dep.parent, Package):
8803                                         continue
8804                                 if dep.parent.operation == "merge":
8805                                         unsatisfied_deps.append(dep)
8806                                         continue
8807
8808                                 # For unsatisfied deps of installed packages, only account for
8809                                 # them if they are in the subgraph of dependencies of a package
8810                                 # which is scheduled to be installed.
8811                                 unsatisfied_install = False
8812                                 traversed = set()
8813                                 dep_stack = self.digraph.parent_nodes(dep.parent)
8814                                 while dep_stack:
8815                                         node = dep_stack.pop()
8816                                         if not isinstance(node, Package):
8817                                                 continue
8818                                         if node.operation == "merge":
8819                                                 unsatisfied_install = True
8820                                                 break
8821                                         if node in traversed:
8822                                                 continue
8823                                         traversed.add(node)
8824                                         dep_stack.extend(self.digraph.parent_nodes(node))
8825
8826                                 if unsatisfied_install:
8827                                         unsatisfied_deps.append(dep)
8828
8829                         if masked_tasks or unsatisfied_deps:
8830                                 # This probably means that a required package
8831                                 # was dropped via --skipfirst. It makes the
8832                                 # resume list invalid, so convert it to a
8833                                 # UnsatisfiedResumeDep exception.
8834                                 raise self.UnsatisfiedResumeDep(self,
8835                                         masked_tasks + unsatisfied_deps)
8836                         self._serialized_tasks_cache = None
8837                         try:
8838                                 self.altlist()
8839                         except self._unknown_internal_error:
8840                                 return False
8841
8842                 return True
8843
8844         def _load_favorites(self, favorites):
8845                 """
8846                 Use a list of favorites to resume state from a
8847                 previous select_files() call. This creates similar
8848                 DependencyArg instances to those that would have
8849                 been created by the original select_files() call.
8850                 This allows Package instances to be matched with
8851                 DependencyArg instances during graph creation.
8852                 """
8853                 root_config = self.roots[self.target_root]
8854                 getSetAtoms = root_config.setconfig.getSetAtoms
8855                 sets = root_config.sets
8856                 args = []
8857                 for x in favorites:
8858                         if not isinstance(x, basestring):
8859                                 continue
8860                         if x in ("system", "world"):
8861                                 x = SETPREFIX + x
8862                         if x.startswith(SETPREFIX):
8863                                 s = x[len(SETPREFIX):]
8864                                 if s not in sets:
8865                                         continue
8866                                 if s in self._sets:
8867                                         continue
8868                                 # Recursively expand sets so that containment tests in
8869                                 # self._get_parent_sets() properly match atoms in nested
8870                                 # sets (like if world contains system).
8871                                 expanded_set = InternalPackageSet(
8872                                         initial_atoms=getSetAtoms(s))
8873                                 self._sets[s] = expanded_set
8874                                 args.append(SetArg(arg=x, set=expanded_set,
8875                                         root_config=root_config))
8876                         else:
8877                                 if not portage.isvalidatom(x):
8878                                         continue
8879                                 args.append(AtomArg(arg=x, atom=x,
8880                                         root_config=root_config))
8881
8882                 self._set_args(args)
8883                 return args
8884
8885         class UnsatisfiedResumeDep(portage.exception.PortageException):
8886                 """
8887                 A dependency of a resume list is not installed. This
8888                 can occur when a required package is dropped from the
8889                 merge list via --skipfirst.
8890                 """
8891                 def __init__(self, depgraph, value):
8892                         portage.exception.PortageException.__init__(self, value)
8893                         self.depgraph = depgraph
8894
8895         class _internal_exception(portage.exception.PortageException):
8896                 def __init__(self, value=""):
8897                         portage.exception.PortageException.__init__(self, value)
8898
8899         class _unknown_internal_error(_internal_exception):
8900                 """
8901                 Used by the depgraph internally to terminate graph creation.
8902                 The specific reason for the failure should have been dumped
8903                 to stderr, unfortunately, the exact reason for the failure
8904                 may not be known.
8905                 """
8906
8907         class _serialize_tasks_retry(_internal_exception):
8908                 """
8909                 This is raised by the _serialize_tasks() method when it needs to
8910                 be called again for some reason. The only case that it's currently
8911                 used for is when neglected dependencies need to be added to the
8912                 graph in order to avoid making a potentially unsafe decision.
8913                 """
8914
8915         class _dep_check_composite_db(portage.dbapi):
8916                 """
8917                 A dbapi-like interface that is optimized for use in dep_check() calls.
8918                 This is built on top of the existing depgraph package selection logic.
8919                 Some packages that have been added to the graph may be masked from this
8920                 view in order to influence the atom preference selection that occurs
8921                 via dep_check().
8922                 """
8923                 def __init__(self, depgraph, root):
8924                         portage.dbapi.__init__(self)
8925                         self._depgraph = depgraph
8926                         self._root = root
8927                         self._match_cache = {}
8928                         self._cpv_pkg_map = {}
8929
8930                 def _clear_cache(self):
8931                         self._match_cache.clear()
8932                         self._cpv_pkg_map.clear()
8933
8934                 def match(self, atom):
8935                         ret = self._match_cache.get(atom)
8936                         if ret is not None:
8937                                 return ret[:]
8938                         orig_atom = atom
8939                         if "/" not in atom:
8940                                 atom = self._dep_expand(atom)
8941                         pkg, existing = self._depgraph._select_package(self._root, atom)
8942                         if not pkg:
8943                                 ret = []
8944                         else:
8945                                 # Return the highest available from select_package() as well as
8946                                 # any matching slots in the graph db.
8947                                 slots = set()
8948                                 slots.add(pkg.metadata["SLOT"])
8949                                 atom_cp = portage.dep_getkey(atom)
8950                                 if pkg.cp.startswith("virtual/"):
8951                                         # For new-style virtual lookahead that occurs inside
8952                                         # dep_check(), examine all slots. This is needed
8953                                         # so that newer slots will not unnecessarily be pulled in
8954                                         # when a satisfying lower slot is already installed. For
8955                                         # example, if virtual/jdk-1.4 is satisfied via kaffe then
8956                                         # there's no need to pull in a newer slot to satisfy a
8957                                         # virtual/jdk dependency.
8958                                         for db, pkg_type, built, installed, db_keys in \
8959                                                 self._depgraph._filtered_trees[self._root]["dbs"]:
8960                                                 for cpv in db.match(atom):
8961                                                         if portage.cpv_getkey(cpv) != pkg.cp:
8962                                                                 continue
8963                                                         slots.add(db.aux_get(cpv, ["SLOT"])[0])
8964                                 ret = []
8965                                 if self._visible(pkg):
8966                                         self._cpv_pkg_map[pkg.cpv] = pkg
8967                                         ret.append(pkg.cpv)
8968                                 slots.remove(pkg.metadata["SLOT"])
8969                                 while slots:
8970                                         slot_atom = "%s:%s" % (atom_cp, slots.pop())
8971                                         pkg, existing = self._depgraph._select_package(
8972                                                 self._root, slot_atom)
8973                                         if not pkg:
8974                                                 continue
8975                                         if not self._visible(pkg):
8976                                                 continue
8977                                         self._cpv_pkg_map[pkg.cpv] = pkg
8978                                         ret.append(pkg.cpv)
8979                                 if ret:
8980                                         self._cpv_sort_ascending(ret)
8981                         self._match_cache[orig_atom] = ret
8982                         return ret[:]
8983
8984                 def _visible(self, pkg):
8985                         if pkg.installed and "selective" not in self._depgraph.myparams:
8986                                 try:
8987                                         arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
8988                                 except (StopIteration, portage.exception.InvalidDependString):
8989                                         arg = None
8990                                 if arg:
8991                                         return False
8992                         if pkg.installed:
8993                                 try:
8994                                         if not visible(
8995                                                 self._depgraph.pkgsettings[pkg.root], pkg):
8996                                                 return False
8997                                 except portage.exception.InvalidDependString:
8998                                         pass
8999                         in_graph = self._depgraph._slot_pkg_map[
9000                                 self._root].get(pkg.slot_atom)
9001                         if in_graph is None:
9002                                 # Mask choices for packages which are not the highest visible
9003                                 # version within their slot (since they usually trigger slot
9004                                 # conflicts).
9005                                 highest_visible, in_graph = self._depgraph._select_package(
9006                                         self._root, pkg.slot_atom)
9007                                 if pkg != highest_visible:
9008                                         return False
9009                         elif in_graph != pkg:
9010                                 # Mask choices for packages that would trigger a slot
9011                                 # conflict with a previously selected package.
9012                                 return False
9013                         return True
9014
9015                 def _dep_expand(self, atom):
9016                         """
9017                         This is only needed for old installed packages that may
9018                         contain atoms that are not fully qualified with a specific
9019                         category. Emulate the cpv_expand() function that's used by
9020                         dbapi.match() in cases like this. If there are multiple
9021                         matches, it's often due to a new-style virtual that has
9022                         been added, so try to filter those out to avoid raising
9023                         a ValueError.
9024                         """
9025                         root_config = self._depgraph.roots[self._root]
9026                         orig_atom = atom
9027                         expanded_atoms = self._depgraph._dep_expand(root_config, atom)
9028                         if len(expanded_atoms) > 1:
9029                                 non_virtual_atoms = []
9030                                 for x in expanded_atoms:
9031                                         if not portage.dep_getkey(x).startswith("virtual/"):
9032                                                 non_virtual_atoms.append(x)
9033                                 if len(non_virtual_atoms) == 1:
9034                                         expanded_atoms = non_virtual_atoms
9035                         if len(expanded_atoms) > 1:
9036                                 # compatible with portage.cpv_expand()
9037                                 raise portage.exception.AmbiguousPackageName(
9038                                         [portage.dep_getkey(x) for x in expanded_atoms])
9039                         if expanded_atoms:
9040                                 atom = expanded_atoms[0]
9041                         else:
9042                                 null_atom = insert_category_into_atom(atom, "null")
9043                                 null_cp = portage.dep_getkey(null_atom)
9044                                 cat, atom_pn = portage.catsplit(null_cp)
9045                                 virts_p = root_config.settings.get_virts_p().get(atom_pn)
9046                                 if virts_p:
9047                                         # Allow the resolver to choose which virtual.
9048                                         atom = insert_category_into_atom(atom, "virtual")
9049                                 else:
9050                                         atom = insert_category_into_atom(atom, "null")
9051                         return atom
9052
9053                 def aux_get(self, cpv, wants):
9054                         metadata = self._cpv_pkg_map[cpv].metadata
9055                         return [metadata.get(x, "") for x in wants]
9056
9057 class RepoDisplay(object):
9058         def __init__(self, roots):
9059                 self._shown_repos = {}
9060                 self._unknown_repo = False
9061                 repo_paths = set()
9062                 for root_config in roots.itervalues():
9063                         portdir = root_config.settings.get("PORTDIR")
9064                         if portdir:
9065                                 repo_paths.add(portdir)
9066                         overlays = root_config.settings.get("PORTDIR_OVERLAY")
9067                         if overlays:
9068                                 repo_paths.update(overlays.split())
9069                 repo_paths = list(repo_paths)
9070                 self._repo_paths = repo_paths
9071                 self._repo_paths_real = [ os.path.realpath(repo_path) \
9072                         for repo_path in repo_paths ]
9073
9074                 # pre-allocate index for PORTDIR so that it always has index 0.
9075                 for root_config in roots.itervalues():
9076                         portdb = root_config.trees["porttree"].dbapi
9077                         portdir = portdb.porttree_root
9078                         if portdir:
9079                                 self.repoStr(portdir)
9080
9081         def repoStr(self, repo_path_real):
9082                 real_index = -1
9083                 if repo_path_real:
9084                         real_index = self._repo_paths_real.index(repo_path_real)
9085                 if real_index == -1:
9086                         s = "?"
9087                         self._unknown_repo = True
9088                 else:
9089                         shown_repos = self._shown_repos
9090                         repo_paths = self._repo_paths
9091                         repo_path = repo_paths[real_index]
9092                         index = shown_repos.get(repo_path)
9093                         if index is None:
9094                                 index = len(shown_repos)
9095                                 shown_repos[repo_path] = index
9096                         s = str(index)
9097                 return s
9098
9099         def __str__(self):
9100                 output = []
9101                 shown_repos = self._shown_repos
9102                 unknown_repo = self._unknown_repo
9103                 if shown_repos or self._unknown_repo:
9104                         output.append("Portage tree and overlays:\n")
9105                 show_repo_paths = list(shown_repos)
9106                 for repo_path, repo_index in shown_repos.iteritems():
9107                         show_repo_paths[repo_index] = repo_path
9108                 if show_repo_paths:
9109                         for index, repo_path in enumerate(show_repo_paths):
9110                                 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9111                 if unknown_repo:
9112                         output.append(" "+teal("[?]") + \
9113                                 " indicates that the source repository could not be determined\n")
9114                 return "".join(output)
9115
9116 class PackageCounters(object):
9117
9118         def __init__(self):
9119                 self.upgrades   = 0
9120                 self.downgrades = 0
9121                 self.new        = 0
9122                 self.newslot    = 0
9123                 self.reinst     = 0
9124                 self.uninst     = 0
9125                 self.blocks     = 0
9126                 self.blocks_satisfied         = 0
9127                 self.totalsize  = 0
9128                 self.restrict_fetch           = 0
9129                 self.restrict_fetch_satisfied = 0
9130                 self.interactive              = 0
9131
9132         def __str__(self):
9133                 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
9134                 myoutput = []
9135                 details = []
9136                 myoutput.append("Total: %s package" % total_installs)
9137                 if total_installs != 1:
9138                         myoutput.append("s")
9139                 if total_installs != 0:
9140                         myoutput.append(" (")
9141                 if self.upgrades > 0:
9142                         details.append("%s upgrade" % self.upgrades)
9143                         if self.upgrades > 1:
9144                                 details[-1] += "s"
9145                 if self.downgrades > 0:
9146                         details.append("%s downgrade" % self.downgrades)
9147                         if self.downgrades > 1:
9148                                 details[-1] += "s"
9149                 if self.new > 0:
9150                         details.append("%s new" % self.new)
9151                 if self.newslot > 0:
9152                         details.append("%s in new slot" % self.newslot)
9153                         if self.newslot > 1:
9154                                 details[-1] += "s"
9155                 if self.reinst > 0:
9156                         details.append("%s reinstall" % self.reinst)
9157                         if self.reinst > 1:
9158                                 details[-1] += "s"
9159                 if self.uninst > 0:
9160                         details.append("%s uninstall" % self.uninst)
9161                         if self.uninst > 1:
9162                                 details[-1] += "s"
9163                 if self.interactive > 0:
9164                         details.append("%s %s" % (self.interactive,
9165                                 colorize("WARN", "interactive")))
9166                 myoutput.append(", ".join(details))
9167                 if total_installs != 0:
9168                         myoutput.append(")")
9169                 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
9170                 if self.restrict_fetch:
9171                         myoutput.append("\nFetch Restriction: %s package" % \
9172                                 self.restrict_fetch)
9173                         if self.restrict_fetch > 1:
9174                                 myoutput.append("s")
9175                 if self.restrict_fetch_satisfied < self.restrict_fetch:
9176                         myoutput.append(bad(" (%s unsatisfied)") % \
9177                                 (self.restrict_fetch - self.restrict_fetch_satisfied))
9178                 if self.blocks > 0:
9179                         myoutput.append("\nConflict: %s block" % \
9180                                 self.blocks)
9181                         if self.blocks > 1:
9182                                 myoutput.append("s")
9183                         if self.blocks_satisfied < self.blocks:
9184                                 myoutput.append(bad(" (%s unsatisfied)") % \
9185                                         (self.blocks - self.blocks_satisfied))
9186                 return "".join(myoutput)
9187
9188 class PollSelectAdapter(PollConstants):
9189
9190         """
9191         Use select to emulate a poll object, for
9192         systems that don't support poll().
9193         """
9194
9195         def __init__(self):
9196                 self._registered = {}
9197                 self._select_args = [[], [], []]
9198
9199         def register(self, fd, *args):
9200                 """
9201                 Only POLLIN is currently supported!
9202                 """
9203                 if len(args) > 1:
9204                         raise TypeError(
9205                                 "register expected at most 2 arguments, got " + \
9206                                 repr(1 + len(args)))
9207
9208                 eventmask = PollConstants.POLLIN | \
9209                         PollConstants.POLLPRI | PollConstants.POLLOUT
9210                 if args:
9211                         eventmask = args[0]
9212
9213                 self._registered[fd] = eventmask
9214                 self._select_args = None
9215
9216         def unregister(self, fd):
9217                 self._select_args = None
9218                 del self._registered[fd]
9219
9220         def poll(self, *args):
9221                 if len(args) > 1:
9222                         raise TypeError(
9223                                 "poll expected at most 2 arguments, got " + \
9224                                 repr(1 + len(args)))
9225
9226                 timeout = None
9227                 if args:
9228                         timeout = args[0]
9229
9230                 select_args = self._select_args
9231                 if select_args is None:
9232                         select_args = [self._registered.keys(), [], []]
9233
9234                 if timeout is not None:
9235                         select_args = select_args[:]
9236                         # Translate poll() timeout args to select() timeout args:
9237                         #
9238                         #          | units        | value(s) for indefinite block
9239                         # ---------|--------------|------------------------------
9240                         #   poll   | milliseconds | omitted, negative, or None
9241                         # ---------|--------------|------------------------------
9242                         #   select | seconds      | omitted
9243                         # ---------|--------------|------------------------------
9244
9245                         if timeout is not None and timeout < 0:
9246                                 timeout = None
9247                         if timeout is not None:
9248                                 select_args.append(timeout / 1000)
9249
9250                 select_events = select.select(*select_args)
9251                 poll_events = []
9252                 for fd in select_events[0]:
9253                         poll_events.append((fd, PollConstants.POLLIN))
9254                 return poll_events
9255
9256 class SequentialTaskQueue(SlotObject):
9257
9258         __slots__ = ("max_jobs", "running_tasks") + \
9259                 ("_dirty", "_scheduling", "_task_queue")
9260
9261         def __init__(self, **kwargs):
9262                 SlotObject.__init__(self, **kwargs)
9263                 self._task_queue = deque()
9264                 self.running_tasks = set()
9265                 if self.max_jobs is None:
9266                         self.max_jobs = 1
9267                 self._dirty = True
9268
9269         def add(self, task):
9270                 self._task_queue.append(task)
9271                 self._dirty = True
9272
9273         def addFront(self, task):
9274                 self._task_queue.appendleft(task)
9275                 self._dirty = True
9276
9277         def schedule(self):
9278
9279                 if not self._dirty:
9280                         return False
9281
9282                 if not self:
9283                         return False
9284
9285                 if self._scheduling:
9286                         # Ignore any recursive schedule() calls triggered via
9287                         # self._task_exit().
9288                         return False
9289
9290                 self._scheduling = True
9291
9292                 task_queue = self._task_queue
9293                 running_tasks = self.running_tasks
9294                 max_jobs = self.max_jobs
9295                 state_changed = False
9296
9297                 while task_queue and \
9298                         (max_jobs is True or len(running_tasks) < max_jobs):
9299                         task = task_queue.popleft()
9300                         cancelled = getattr(task, "cancelled", None)
9301                         if not cancelled:
9302                                 running_tasks.add(task)
9303                                 task.addExitListener(self._task_exit)
9304                                 task.start()
9305                         state_changed = True
9306
9307                 self._dirty = False
9308                 self._scheduling = False
9309
9310                 return state_changed
9311
9312         def _task_exit(self, task):
9313                 """
9314                 Since we can always rely on exit listeners being called, the set of
9315                 running tasks is always pruned automatically and there is never any need
9316                 to actively prune it.
9317                 """
9318                 self.running_tasks.remove(task)
9319                 if self._task_queue:
9320                         self._dirty = True
9321
9322         def clear(self):
9323                 self._task_queue.clear()
9324                 running_tasks = self.running_tasks
9325                 while running_tasks:
9326                         task = running_tasks.pop()
9327                         task.removeExitListener(self._task_exit)
9328                         task.cancel()
9329                 self._dirty = False
9330
9331         def __nonzero__(self):
9332                 return bool(self._task_queue or self.running_tasks)
9333
9334         def __len__(self):
9335                 return len(self._task_queue) + len(self.running_tasks)
9336
9337 _can_poll_device = None
9338
9339 def can_poll_device():
9340         """
9341         Test if it's possible to use poll() on a device such as a pty. This
9342         is known to fail on Darwin.
9343         @rtype: bool
9344         @returns: True if poll() on a device succeeds, False otherwise.
9345         """
9346
9347         global _can_poll_device
9348         if _can_poll_device is not None:
9349                 return _can_poll_device
9350
9351         if not hasattr(select, "poll"):
9352                 _can_poll_device = False
9353                 return _can_poll_device
9354
9355         try:
9356                 dev_null = open('/dev/null', 'rb')
9357         except IOError:
9358                 _can_poll_device = False
9359                 return _can_poll_device
9360
9361         p = select.poll()
9362         p.register(dev_null.fileno(), PollConstants.POLLIN)
9363
9364         invalid_request = False
9365         for f, event in p.poll():
9366                 if event & PollConstants.POLLNVAL:
9367                         invalid_request = True
9368                         break
9369         dev_null.close()
9370
9371         _can_poll_device = not invalid_request
9372         return _can_poll_device
9373
9374 def create_poll_instance():
9375         """
9376         Create an instance of select.poll, or an instance of
9377         PollSelectAdapter there is no poll() implementation or
9378         it is broken somehow.
9379         """
9380         if can_poll_device():
9381                 return select.poll()
9382         return PollSelectAdapter()
9383
9384 getloadavg = getattr(os, "getloadavg", None)
9385 if getloadavg is None:
9386         def getloadavg():
9387                 """
9388                 Uses /proc/loadavg to emulate os.getloadavg().
9389                 Raises OSError if the load average was unobtainable.
9390                 """
9391                 try:
9392                         loadavg_str = open('/proc/loadavg').readline()
9393                 except IOError:
9394                         # getloadavg() is only supposed to raise OSError, so convert
9395                         raise OSError('unknown')
9396                 loadavg_split = loadavg_str.split()
9397                 if len(loadavg_split) < 3:
9398                         raise OSError('unknown')
9399                 loadavg_floats = []
9400                 for i in xrange(3):
9401                         try:
9402                                 loadavg_floats.append(float(loadavg_split[i]))
9403                         except ValueError:
9404                                 raise OSError('unknown')
9405                 return tuple(loadavg_floats)
9406
9407 class PollScheduler(object):
9408
9409         class _sched_iface_class(SlotObject):
9410                 __slots__ = ("register", "schedule", "unregister")
9411
9412         def __init__(self):
9413                 self._max_jobs = 1
9414                 self._max_load = None
9415                 self._jobs = 0
9416                 self._poll_event_queue = []
9417                 self._poll_event_handlers = {}
9418                 self._poll_event_handler_ids = {}
9419                 # Increment id for each new handler.
9420                 self._event_handler_id = 0
9421                 self._poll_obj = create_poll_instance()
9422                 self._scheduling = False
9423
9424         def _schedule(self):
9425                 """
9426                 Calls _schedule_tasks() and automatically returns early from
9427                 any recursive calls to this method that the _schedule_tasks()
9428                 call might trigger. This makes _schedule() safe to call from
9429                 inside exit listeners.
9430                 """
9431                 if self._scheduling:
9432                         return False
9433                 self._scheduling = True
9434                 try:
9435                         return self._schedule_tasks()
9436                 finally:
9437                         self._scheduling = False
9438
9439         def _running_job_count(self):
9440                 return self._jobs
9441
9442         def _can_add_job(self):
9443                 max_jobs = self._max_jobs
9444                 max_load = self._max_load
9445
9446                 if self._max_jobs is not True and \
9447                         self._running_job_count() >= self._max_jobs:
9448                         return False
9449
9450                 if max_load is not None and \
9451                         (max_jobs is True or max_jobs > 1) and \
9452                         self._running_job_count() >= 1:
9453                         try:
9454                                 avg1, avg5, avg15 = getloadavg()
9455                         except OSError:
9456                                 return False
9457
9458                         if avg1 >= max_load:
9459                                 return False
9460
9461                 return True
9462
9463         def _poll(self, timeout=None):
9464                 """
9465                 All poll() calls pass through here. The poll events
9466                 are added directly to self._poll_event_queue.
9467                 In order to avoid endless blocking, this raises
9468                 StopIteration if timeout is None and there are
9469                 no file descriptors to poll.
9470                 """
9471                 if not self._poll_event_handlers:
9472                         self._schedule()
9473                         if timeout is None and \
9474                                 not self._poll_event_handlers:
9475                                 raise StopIteration(
9476                                         "timeout is None and there are no poll() event handlers")
9477
9478                 # The following error is known to occur with Linux kernel versions
9479                 # less than 2.6.24:
9480                 #
9481                 #   select.error: (4, 'Interrupted system call')
9482                 #
9483                 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9484                 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9485                 # without any events.
9486                 while True:
9487                         try:
9488                                 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9489                                 break
9490                         except select.error, e:
9491                                 writemsg_level("\n!!! select error: %s\n" % (e,),
9492                                         level=logging.ERROR, noiselevel=-1)
9493                                 del e
9494                                 if timeout is not None:
9495                                         break
9496
9497         def _next_poll_event(self, timeout=None):
9498                 """
9499                 Since the _schedule_wait() loop is called by event
9500                 handlers from _poll_loop(), maintain a central event
9501                 queue for both of them to share events from a single
9502                 poll() call. In order to avoid endless blocking, this
9503                 raises StopIteration if timeout is None and there are
9504                 no file descriptors to poll.
9505                 """
9506                 if not self._poll_event_queue:
9507                         self._poll(timeout)
9508                 return self._poll_event_queue.pop()
9509
9510         def _poll_loop(self):
9511
9512                 event_handlers = self._poll_event_handlers
9513                 event_handled = False
9514
9515                 try:
9516                         while event_handlers:
9517                                 f, event = self._next_poll_event()
9518                                 handler, reg_id = event_handlers[f]
9519                                 handler(f, event)
9520                                 event_handled = True
9521                 except StopIteration:
9522                         event_handled = True
9523
9524                 if not event_handled:
9525                         raise AssertionError("tight loop")
9526
9527         def _schedule_yield(self):
9528                 """
9529                 Schedule for a short period of time chosen by the scheduler based
9530                 on internal state. Synchronous tasks should call this periodically
9531                 in order to allow the scheduler to service pending poll events. The
9532                 scheduler will call poll() exactly once, without blocking, and any
9533                 resulting poll events will be serviced.
9534                 """
9535                 event_handlers = self._poll_event_handlers
9536                 events_handled = 0
9537
9538                 if not event_handlers:
9539                         return bool(events_handled)
9540
9541                 if not self._poll_event_queue:
9542                         self._poll(0)
9543
9544                 try:
9545                         while event_handlers and self._poll_event_queue:
9546                                 f, event = self._next_poll_event()
9547                                 handler, reg_id = event_handlers[f]
9548                                 handler(f, event)
9549                                 events_handled += 1
9550                 except StopIteration:
9551                         events_handled += 1
9552
9553                 return bool(events_handled)
9554
9555         def _register(self, f, eventmask, handler):
9556                 """
9557                 @rtype: Integer
9558                 @return: A unique registration id, for use in schedule() or
9559                         unregister() calls.
9560                 """
9561                 if f in self._poll_event_handlers:
9562                         raise AssertionError("fd %d is already registered" % f)
9563                 self._event_handler_id += 1
9564                 reg_id = self._event_handler_id
9565                 self._poll_event_handler_ids[reg_id] = f
9566                 self._poll_event_handlers[f] = (handler, reg_id)
9567                 self._poll_obj.register(f, eventmask)
9568                 return reg_id
9569
9570         def _unregister(self, reg_id):
9571                 f = self._poll_event_handler_ids[reg_id]
9572                 self._poll_obj.unregister(f)
9573                 del self._poll_event_handlers[f]
9574                 del self._poll_event_handler_ids[reg_id]
9575
9576         def _schedule_wait(self, wait_ids):
9577                 """
9578                 Schedule until wait_id is not longer registered
9579                 for poll() events.
9580                 @type wait_id: int
9581                 @param wait_id: a task id to wait for
9582                 """
9583                 event_handlers = self._poll_event_handlers
9584                 handler_ids = self._poll_event_handler_ids
9585                 event_handled = False
9586
9587                 if isinstance(wait_ids, int):
9588                         wait_ids = frozenset([wait_ids])
9589
9590                 try:
9591                         while wait_ids.intersection(handler_ids):
9592                                 f, event = self._next_poll_event()
9593                                 handler, reg_id = event_handlers[f]
9594                                 handler(f, event)
9595                                 event_handled = True
9596                 except StopIteration:
9597                         event_handled = True
9598
9599                 return event_handled
9600
9601 class QueueScheduler(PollScheduler):
9602
9603         """
9604         Add instances of SequentialTaskQueue and then call run(). The
9605         run() method returns when no tasks remain.
9606         """
9607
9608         def __init__(self, max_jobs=None, max_load=None):
9609                 PollScheduler.__init__(self)
9610
9611                 if max_jobs is None:
9612                         max_jobs = 1
9613
9614                 self._max_jobs = max_jobs
9615                 self._max_load = max_load
9616                 self.sched_iface = self._sched_iface_class(
9617                         register=self._register,
9618                         schedule=self._schedule_wait,
9619                         unregister=self._unregister)
9620
9621                 self._queues = []
9622                 self._schedule_listeners = []
9623
9624         def add(self, q):
9625                 self._queues.append(q)
9626
9627         def remove(self, q):
9628                 self._queues.remove(q)
9629
9630         def run(self):
9631
9632                 while self._schedule():
9633                         self._poll_loop()
9634
9635                 while self._running_job_count():
9636                         self._poll_loop()
9637
9638         def _schedule_tasks(self):
9639                 """
9640                 @rtype: bool
9641                 @returns: True if there may be remaining tasks to schedule,
9642                         False otherwise.
9643                 """
9644                 while self._can_add_job():
9645                         n = self._max_jobs - self._running_job_count()
9646                         if n < 1:
9647                                 break
9648
9649                         if not self._start_next_job(n):
9650                                 return False
9651
9652                 for q in self._queues:
9653                         if q:
9654                                 return True
9655                 return False
9656
9657         def _running_job_count(self):
9658                 job_count = 0
9659                 for q in self._queues:
9660                         job_count += len(q.running_tasks)
9661                 self._jobs = job_count
9662                 return job_count
9663
9664         def _start_next_job(self, n=1):
9665                 started_count = 0
9666                 for q in self._queues:
9667                         initial_job_count = len(q.running_tasks)
9668                         q.schedule()
9669                         final_job_count = len(q.running_tasks)
9670                         if final_job_count > initial_job_count:
9671                                 started_count += (final_job_count - initial_job_count)
9672                         if started_count >= n:
9673                                 break
9674                 return started_count
9675
9676 class TaskScheduler(object):
9677
9678         """
9679         A simple way to handle scheduling of AsynchrousTask instances. Simply
9680         add tasks and call run(). The run() method returns when no tasks remain.
9681         """
9682
9683         def __init__(self, max_jobs=None, max_load=None):
9684                 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9685                 self._scheduler = QueueScheduler(
9686                         max_jobs=max_jobs, max_load=max_load)
9687                 self.sched_iface = self._scheduler.sched_iface
9688                 self.run = self._scheduler.run
9689                 self._scheduler.add(self._queue)
9690
9691         def add(self, task):
9692                 self._queue.add(task)
9693
9694 class JobStatusDisplay(object):
9695
9696         _bound_properties = ("curval", "failed", "running")
9697         _jobs_column_width = 48
9698
9699         # Don't update the display unless at least this much
9700         # time has passed, in units of seconds.
9701         _min_display_latency = 2
9702
9703         _default_term_codes = {
9704                 'cr'  : '\r',
9705                 'el'  : '\x1b[K',
9706                 'nel' : '\n',
9707         }
9708
9709         _termcap_name_map = {
9710                 'carriage_return' : 'cr',
9711                 'clr_eol'         : 'el',
9712                 'newline'         : 'nel',
9713         }
9714
9715         def __init__(self, out=sys.stdout, quiet=False):
9716                 object.__setattr__(self, "out", out)
9717                 object.__setattr__(self, "quiet", quiet)
9718                 object.__setattr__(self, "maxval", 0)
9719                 object.__setattr__(self, "merges", 0)
9720                 object.__setattr__(self, "_changed", False)
9721                 object.__setattr__(self, "_displayed", False)
9722                 object.__setattr__(self, "_last_display_time", 0)
9723                 object.__setattr__(self, "width", 80)
9724                 self.reset()
9725
9726                 isatty = hasattr(out, "isatty") and out.isatty()
9727                 object.__setattr__(self, "_isatty", isatty)
9728                 if not isatty or not self._init_term():
9729                         term_codes = {}
9730                         for k, capname in self._termcap_name_map.iteritems():
9731                                 term_codes[k] = self._default_term_codes[capname]
9732                         object.__setattr__(self, "_term_codes", term_codes)
9733                 encoding = sys.getdefaultencoding()
9734                 for k, v in self._term_codes.items():
9735                         if not isinstance(v, basestring):
9736                                 self._term_codes[k] = v.decode(encoding, 'replace')
9737
9738         def _init_term(self):
9739                 """
9740                 Initialize term control codes.
9741                 @rtype: bool
9742                 @returns: True if term codes were successfully initialized,
9743                         False otherwise.
9744                 """
9745
9746                 term_type = os.environ.get("TERM", "vt100")
9747                 tigetstr = None
9748
9749                 try:
9750                         import curses
9751                         try:
9752                                 curses.setupterm(term_type, self.out.fileno())
9753                                 tigetstr = curses.tigetstr
9754                         except curses.error:
9755                                 pass
9756                 except ImportError:
9757                         pass
9758
9759                 if tigetstr is None:
9760                         return False
9761
9762                 term_codes = {}
9763                 for k, capname in self._termcap_name_map.iteritems():
9764                         code = tigetstr(capname)
9765                         if code is None:
9766                                 code = self._default_term_codes[capname]
9767                         term_codes[k] = code
9768                 object.__setattr__(self, "_term_codes", term_codes)
9769                 return True
9770
9771         def _format_msg(self, msg):
9772                 return ">>> %s" % msg
9773
9774         def _erase(self):
9775                 self.out.write(
9776                         self._term_codes['carriage_return'] + \
9777                         self._term_codes['clr_eol'])
9778                 self.out.flush()
9779                 self._displayed = False
9780
9781         def _display(self, line):
9782                 self.out.write(line)
9783                 self.out.flush()
9784                 self._displayed = True
9785
9786         def _update(self, msg):
9787
9788                 out = self.out
9789                 if not self._isatty:
9790                         out.write(self._format_msg(msg) + self._term_codes['newline'])
9791                         self.out.flush()
9792                         self._displayed = True
9793                         return
9794
9795                 if self._displayed:
9796                         self._erase()
9797
9798                 self._display(self._format_msg(msg))
9799
9800         def displayMessage(self, msg):
9801
9802                 was_displayed = self._displayed
9803
9804                 if self._isatty and self._displayed:
9805                         self._erase()
9806
9807                 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9808                 self.out.flush()
9809                 self._displayed = False
9810
9811                 if was_displayed:
9812                         self._changed = True
9813                         self.display()
9814
9815         def reset(self):
9816                 self.maxval = 0
9817                 self.merges = 0
9818                 for name in self._bound_properties:
9819                         object.__setattr__(self, name, 0)
9820
9821                 if self._displayed:
9822                         self.out.write(self._term_codes['newline'])
9823                         self.out.flush()
9824                         self._displayed = False
9825
9826         def __setattr__(self, name, value):
9827                 old_value = getattr(self, name)
9828                 if value == old_value:
9829                         return
9830                 object.__setattr__(self, name, value)
9831                 if name in self._bound_properties:
9832                         self._property_change(name, old_value, value)
9833
9834         def _property_change(self, name, old_value, new_value):
9835                 self._changed = True
9836                 self.display()
9837
9838         def _load_avg_str(self):
9839                 try:
9840                         avg = getloadavg()
9841                 except OSError:
9842                         return 'unknown'
9843
9844                 max_avg = max(avg)
9845
9846                 if max_avg < 10:
9847                         digits = 2
9848                 elif max_avg < 100:
9849                         digits = 1
9850                 else:
9851                         digits = 0
9852
9853                 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9854
9855         def display(self):
9856                 """
9857                 Display status on stdout, but only if something has
9858                 changed since the last call.
9859                 """
9860
9861                 if self.quiet:
9862                         return
9863
9864                 current_time = time.time()
9865                 time_delta = current_time - self._last_display_time
9866                 if self._displayed and \
9867                         not self._changed:
9868                         if not self._isatty:
9869                                 return
9870                         if time_delta < self._min_display_latency:
9871                                 return
9872
9873                 self._last_display_time = current_time
9874                 self._changed = False
9875                 self._display_status()
9876
9877         def _display_status(self):
9878                 # Don't use len(self._completed_tasks) here since that also
9879                 # can include uninstall tasks.
9880                 curval_str = str(self.curval)
9881                 maxval_str = str(self.maxval)
9882                 running_str = str(self.running)
9883                 failed_str = str(self.failed)
9884                 load_avg_str = self._load_avg_str()
9885
9886                 color_output = StringIO()
9887                 plain_output = StringIO()
9888                 style_file = portage.output.ConsoleStyleFile(color_output)
9889                 style_file.write_listener = plain_output
9890                 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9891                 style_writer.style_listener = style_file.new_styles
9892                 f = formatter.AbstractFormatter(style_writer)
9893
9894                 number_style = "INFORM"
9895                 f.add_literal_data("Jobs: ")
9896                 f.push_style(number_style)
9897                 f.add_literal_data(curval_str)
9898                 f.pop_style()
9899                 f.add_literal_data(" of ")
9900                 f.push_style(number_style)
9901                 f.add_literal_data(maxval_str)
9902                 f.pop_style()
9903                 f.add_literal_data(" complete")
9904
9905                 if self.running:
9906                         f.add_literal_data(", ")
9907                         f.push_style(number_style)
9908                         f.add_literal_data(running_str)
9909                         f.pop_style()
9910                         f.add_literal_data(" running")
9911
9912                 if self.failed:
9913                         f.add_literal_data(", ")
9914                         f.push_style(number_style)
9915                         f.add_literal_data(failed_str)
9916                         f.pop_style()
9917                         f.add_literal_data(" failed")
9918
9919                 padding = self._jobs_column_width - len(plain_output.getvalue())
9920                 if padding > 0:
9921                         f.add_literal_data(padding * " ")
9922
9923                 f.add_literal_data("Load avg: ")
9924                 f.add_literal_data(load_avg_str)
9925
9926                 # Truncate to fit width, to avoid making the terminal scroll if the
9927                 # line overflows (happens when the load average is large).
9928                 plain_output = plain_output.getvalue()
9929                 if self._isatty and len(plain_output) > self.width:
9930                         # Use plain_output here since it's easier to truncate
9931                         # properly than the color output which contains console
9932                         # color codes.
9933                         self._update(plain_output[:self.width])
9934                 else:
9935                         self._update(color_output.getvalue())
9936
9937                 xtermTitle(" ".join(plain_output.split()))
9938
9939 class Scheduler(PollScheduler):
9940
9941         _opts_ignore_blockers = \
9942                 frozenset(["--buildpkgonly",
9943                 "--fetchonly", "--fetch-all-uri",
9944                 "--nodeps", "--pretend"])
9945
9946         _opts_no_background = \
9947                 frozenset(["--pretend",
9948                 "--fetchonly", "--fetch-all-uri"])
9949
9950         _opts_no_restart = frozenset(["--buildpkgonly",
9951                 "--fetchonly", "--fetch-all-uri", "--pretend"])
9952
9953         _bad_resume_opts = set(["--ask", "--changelog",
9954                 "--resume", "--skipfirst"])
9955
9956         _fetch_log = "/var/log/emerge-fetch.log"
9957
9958         class _iface_class(SlotObject):
9959                 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
9960                         "dblinkElog", "fetch", "register", "schedule",
9961                         "scheduleSetup", "scheduleUnpack", "scheduleYield",
9962                         "unregister")
9963
9964         class _fetch_iface_class(SlotObject):
9965                 __slots__ = ("log_file", "schedule")
9966
9967         _task_queues_class = slot_dict_class(
9968                 ("merge", "jobs", "fetch", "unpack"), prefix="")
9969
9970         class _build_opts_class(SlotObject):
9971                 __slots__ = ("buildpkg", "buildpkgonly",
9972                         "fetch_all_uri", "fetchonly", "pretend")
9973
9974         class _binpkg_opts_class(SlotObject):
9975                 __slots__ = ("fetchonly", "getbinpkg", "pretend")
9976
9977         class _pkg_count_class(SlotObject):
9978                 __slots__ = ("curval", "maxval")
9979
9980         class _emerge_log_class(SlotObject):
9981                 __slots__ = ("xterm_titles",)
9982
9983                 def log(self, *pargs, **kwargs):
9984                         if not self.xterm_titles:
9985                                 # Avoid interference with the scheduler's status display.
9986                                 kwargs.pop("short_msg", None)
9987                         emergelog(self.xterm_titles, *pargs, **kwargs)
9988
9989         class _failed_pkg(SlotObject):
9990                 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
9991
9992         class _ConfigPool(object):
9993                 """Interface for a task to temporarily allocate a config
9994                 instance from a pool. This allows a task to be constructed
9995                 long before the config instance actually becomes needed, like
9996                 when prefetchers are constructed for the whole merge list."""
9997                 __slots__ = ("_root", "_allocate", "_deallocate")
9998                 def __init__(self, root, allocate, deallocate):
9999                         self._root = root
10000                         self._allocate = allocate
10001                         self._deallocate = deallocate
10002                 def allocate(self):
10003                         return self._allocate(self._root)
10004                 def deallocate(self, settings):
10005                         self._deallocate(settings)
10006
10007         class _unknown_internal_error(portage.exception.PortageException):
10008                 """
10009                 Used internally to terminate scheduling. The specific reason for
10010                 the failure should have been dumped to stderr.
10011                 """
10012                 def __init__(self, value=""):
10013                         portage.exception.PortageException.__init__(self, value)
10014
10015         def __init__(self, settings, trees, mtimedb, myopts,
10016                 spinner, mergelist, favorites, digraph):
10017                 PollScheduler.__init__(self)
10018                 self.settings = settings
10019                 self.target_root = settings["ROOT"]
10020                 self.trees = trees
10021                 self.myopts = myopts
10022                 self._spinner = spinner
10023                 self._mtimedb = mtimedb
10024                 self._mergelist = mergelist
10025                 self._favorites = favorites
10026                 self._args_set = InternalPackageSet(favorites)
10027                 self._build_opts = self._build_opts_class()
10028                 for k in self._build_opts.__slots__:
10029                         setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
10030                 self._binpkg_opts = self._binpkg_opts_class()
10031                 for k in self._binpkg_opts.__slots__:
10032                         setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
10033
10034                 self.curval = 0
10035                 self._logger = self._emerge_log_class()
10036                 self._task_queues = self._task_queues_class()
10037                 for k in self._task_queues.allowed_keys:
10038                         setattr(self._task_queues, k,
10039                                 SequentialTaskQueue())
10040
10041                 # Holds merges that will wait to be executed when no builds are
10042                 # executing. This is useful for system packages since dependencies
10043                 # on system packages are frequently unspecified.
10044                 self._merge_wait_queue = []
10045                 # Holds merges that have been transfered from the merge_wait_queue to
10046                 # the actual merge queue. They are removed from this list upon
10047                 # completion. Other packages can start building only when this list is
10048                 # empty.
10049                 self._merge_wait_scheduled = []
10050
10051                 # Holds system packages and their deep runtime dependencies. Before
10052                 # being merged, these packages go to merge_wait_queue, to be merged
10053                 # when no other packages are building.
10054                 self._deep_system_deps = set()
10055
10056                 # Holds packages to merge which will satisfy currently unsatisfied
10057                 # deep runtime dependencies of system packages. If this is not empty
10058                 # then no parallel builds will be spawned until it is empty. This
10059                 # minimizes the possibility that a build will fail due to the system
10060                 # being in a fragile state. For example, see bug #259954.
10061                 self._unsatisfied_system_deps = set()
10062
10063                 self._status_display = JobStatusDisplay()
10064                 self._max_load = myopts.get("--load-average")
10065                 max_jobs = myopts.get("--jobs")
10066                 if max_jobs is None:
10067                         max_jobs = 1
10068                 self._set_max_jobs(max_jobs)
10069
10070                 # The root where the currently running
10071                 # portage instance is installed.
10072                 self._running_root = trees["/"]["root_config"]
10073                 self.edebug = 0
10074                 if settings.get("PORTAGE_DEBUG", "") == "1":
10075                         self.edebug = 1
10076                 self.pkgsettings = {}
10077                 self._config_pool = {}
10078                 self._blocker_db = {}
10079                 for root in trees:
10080                         self._config_pool[root] = []
10081                         self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
10082
10083                 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
10084                         schedule=self._schedule_fetch)
10085                 self._sched_iface = self._iface_class(
10086                         dblinkEbuildPhase=self._dblink_ebuild_phase,
10087                         dblinkDisplayMerge=self._dblink_display_merge,
10088                         dblinkElog=self._dblink_elog,
10089                         fetch=fetch_iface, register=self._register,
10090                         schedule=self._schedule_wait,
10091                         scheduleSetup=self._schedule_setup,
10092                         scheduleUnpack=self._schedule_unpack,
10093                         scheduleYield=self._schedule_yield,
10094                         unregister=self._unregister)
10095
10096                 self._prefetchers = weakref.WeakValueDictionary()
10097                 self._pkg_queue = []
10098                 self._completed_tasks = set()
10099
10100                 self._failed_pkgs = []
10101                 self._failed_pkgs_all = []
10102                 self._failed_pkgs_die_msgs = []
10103                 self._post_mod_echo_msgs = []
10104                 self._parallel_fetch = False
10105                 merge_count = len([x for x in mergelist \
10106                         if isinstance(x, Package) and x.operation == "merge"])
10107                 self._pkg_count = self._pkg_count_class(
10108                         curval=0, maxval=merge_count)
10109                 self._status_display.maxval = self._pkg_count.maxval
10110
10111                 # The load average takes some time to respond when new
10112                 # jobs are added, so we need to limit the rate of adding
10113                 # new jobs.
10114                 self._job_delay_max = 10
10115                 self._job_delay_factor = 1.0
10116                 self._job_delay_exp = 1.5
10117                 self._previous_job_start_time = None
10118
10119                 self._set_digraph(digraph)
10120
10121                 # This is used to memoize the _choose_pkg() result when
10122                 # no packages can be chosen until one of the existing
10123                 # jobs completes.
10124                 self._choose_pkg_return_early = False
10125
10126                 features = self.settings.features
10127                 if "parallel-fetch" in features and \
10128                         not ("--pretend" in self.myopts or \
10129                         "--fetch-all-uri" in self.myopts or \
10130                         "--fetchonly" in self.myopts):
10131                         if "distlocks" not in features:
10132                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10133                                 portage.writemsg(red("!!!")+" parallel-fetching " + \
10134                                         "requires the distlocks feature enabled"+"\n",
10135                                         noiselevel=-1)
10136                                 portage.writemsg(red("!!!")+" you have it disabled, " + \
10137                                         "thus parallel-fetching is being disabled"+"\n",
10138                                         noiselevel=-1)
10139                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10140                         elif len(mergelist) > 1:
10141                                 self._parallel_fetch = True
10142
10143                 if self._parallel_fetch:
10144                                 # clear out existing fetch log if it exists
10145                                 try:
10146                                         open(self._fetch_log, 'w')
10147                                 except EnvironmentError:
10148                                         pass
10149
10150                 self._running_portage = None
10151                 portage_match = self._running_root.trees["vartree"].dbapi.match(
10152                         portage.const.PORTAGE_PACKAGE_ATOM)
10153                 if portage_match:
10154                         cpv = portage_match.pop()
10155                         self._running_portage = self._pkg(cpv, "installed",
10156                                 self._running_root, installed=True)
10157
10158         def _poll(self, timeout=None):
10159                 self._schedule()
10160                 PollScheduler._poll(self, timeout=timeout)
10161
10162         def _set_max_jobs(self, max_jobs):
10163                 self._max_jobs = max_jobs
10164                 self._task_queues.jobs.max_jobs = max_jobs
10165
10166         def _background_mode(self):
10167                 """
10168                 Check if background mode is enabled and adjust states as necessary.
10169
10170                 @rtype: bool
10171                 @returns: True if background mode is enabled, False otherwise.
10172                 """
10173                 background = (self._max_jobs is True or \
10174                         self._max_jobs > 1 or "--quiet" in self.myopts) and \
10175                         not bool(self._opts_no_background.intersection(self.myopts))
10176
10177                 if background:
10178                         interactive_tasks = self._get_interactive_tasks()
10179                         if interactive_tasks:
10180                                 background = False
10181                                 writemsg_level(">>> Sending package output to stdio due " + \
10182                                         "to interactive package(s):\n",
10183                                         level=logging.INFO, noiselevel=-1)
10184                                 msg = [""]
10185                                 for pkg in interactive_tasks:
10186                                         pkg_str = "  " + colorize("INFORM", str(pkg.cpv))
10187                                         if pkg.root != "/":
10188                                                 pkg_str += " for " + pkg.root
10189                                         msg.append(pkg_str)
10190                                 msg.append("")
10191                                 writemsg_level("".join("%s\n" % (l,) for l in msg),
10192                                         level=logging.INFO, noiselevel=-1)
10193                                 if self._max_jobs is True or self._max_jobs > 1:
10194                                         self._set_max_jobs(1)
10195                                         writemsg_level(">>> Setting --jobs=1 due " + \
10196                                                 "to the above interactive package(s)\n",
10197                                                 level=logging.INFO, noiselevel=-1)
10198
10199                 self._status_display.quiet = \
10200                         not background or \
10201                         ("--quiet" in self.myopts and \
10202                         "--verbose" not in self.myopts)
10203
10204                 self._logger.xterm_titles = \
10205                         "notitles" not in self.settings.features and \
10206                         self._status_display.quiet
10207
10208                 return background
10209
10210         def _get_interactive_tasks(self):
10211                 from portage import flatten
10212                 from portage.dep import use_reduce, paren_reduce
10213                 interactive_tasks = []
10214                 for task in self._mergelist:
10215                         if not (isinstance(task, Package) and \
10216                                 task.operation == "merge"):
10217                                 continue
10218                         try:
10219                                 properties = flatten(use_reduce(paren_reduce(
10220                                         task.metadata["PROPERTIES"]), uselist=task.use.enabled))
10221                         except portage.exception.InvalidDependString, e:
10222                                 show_invalid_depstring_notice(task,
10223                                         task.metadata["PROPERTIES"], str(e))
10224                                 raise self._unknown_internal_error()
10225                         if "interactive" in properties:
10226                                 interactive_tasks.append(task)
10227                 return interactive_tasks
10228
10229         def _set_digraph(self, digraph):
10230                 if "--nodeps" in self.myopts or \
10231                         (self._max_jobs is not True and self._max_jobs < 2):
10232                         # save some memory
10233                         self._digraph = None
10234                         return
10235
10236                 self._digraph = digraph
10237                 self._find_system_deps()
10238                 self._prune_digraph()
10239                 self._prevent_builddir_collisions()
10240
10241         def _find_system_deps(self):
10242                 """
10243                 Find system packages and their deep runtime dependencies. Before being
10244                 merged, these packages go to merge_wait_queue, to be merged when no
10245                 other packages are building.
10246                 """
10247                 deep_system_deps = self._deep_system_deps
10248                 deep_system_deps.clear()
10249                 deep_system_deps.update(
10250                         _find_deep_system_runtime_deps(self._digraph))
10251                 deep_system_deps.difference_update([pkg for pkg in \
10252                         deep_system_deps if pkg.operation != "merge"])
10253
10254         def _prune_digraph(self):
10255                 """
10256                 Prune any root nodes that are irrelevant.
10257                 """
10258
10259                 graph = self._digraph
10260                 completed_tasks = self._completed_tasks
10261                 removed_nodes = set()
10262                 while True:
10263                         for node in graph.root_nodes():
10264                                 if not isinstance(node, Package) or \
10265                                         (node.installed and node.operation == "nomerge") or \
10266                                         node.onlydeps or \
10267                                         node in completed_tasks:
10268                                         removed_nodes.add(node)
10269                         if removed_nodes:
10270                                 graph.difference_update(removed_nodes)
10271                         if not removed_nodes:
10272                                 break
10273                         removed_nodes.clear()
10274
10275         def _prevent_builddir_collisions(self):
10276                 """
10277                 When building stages, sometimes the same exact cpv needs to be merged
10278                 to both $ROOTs. Add edges to the digraph in order to avoid collisions
10279                 in the builddir. Currently, normal file locks would be inappropriate
10280                 for this purpose since emerge holds all of it's build dir locks from
10281                 the main process.
10282                 """
10283                 cpv_map = {}
10284                 for pkg in self._mergelist:
10285                         if not isinstance(pkg, Package):
10286                                 # a satisfied blocker
10287                                 continue
10288                         if pkg.installed:
10289                                 continue
10290                         if pkg.cpv not in cpv_map:
10291                                 cpv_map[pkg.cpv] = [pkg]
10292                                 continue
10293                         for earlier_pkg in cpv_map[pkg.cpv]:
10294                                 self._digraph.add(earlier_pkg, pkg,
10295                                         priority=DepPriority(buildtime=True))
10296                         cpv_map[pkg.cpv].append(pkg)
10297
10298         class _pkg_failure(portage.exception.PortageException):
10299                 """
10300                 An instance of this class is raised by unmerge() when
10301                 an uninstallation fails.
10302                 """
10303                 status = 1
10304                 def __init__(self, *pargs):
10305                         portage.exception.PortageException.__init__(self, pargs)
10306                         if pargs:
10307                                 self.status = pargs[0]
10308
10309         def _schedule_fetch(self, fetcher):
10310                 """
10311                 Schedule a fetcher on the fetch queue, in order to
10312                 serialize access to the fetch log.
10313                 """
10314                 self._task_queues.fetch.addFront(fetcher)
10315
10316         def _schedule_setup(self, setup_phase):
10317                 """
10318                 Schedule a setup phase on the merge queue, in order to
10319                 serialize unsandboxed access to the live filesystem.
10320                 """
10321                 self._task_queues.merge.addFront(setup_phase)
10322                 self._schedule()
10323
10324         def _schedule_unpack(self, unpack_phase):
10325                 """
10326                 Schedule an unpack phase on the unpack queue, in order
10327                 to serialize $DISTDIR access for live ebuilds.
10328                 """
10329                 self._task_queues.unpack.add(unpack_phase)
10330
10331         def _find_blockers(self, new_pkg):
10332                 """
10333                 Returns a callable which should be called only when
10334                 the vdb lock has been acquired.
10335                 """
10336                 def get_blockers():
10337                         return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
10338                 return get_blockers
10339
10340         def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
10341                 if self._opts_ignore_blockers.intersection(self.myopts):
10342                         return None
10343
10344                 # Call gc.collect() here to avoid heap overflow that
10345                 # triggers 'Cannot allocate memory' errors (reported
10346                 # with python-2.5).
10347                 import gc
10348                 gc.collect()
10349
10350                 blocker_db = self._blocker_db[new_pkg.root]
10351
10352                 blocker_dblinks = []
10353                 for blocking_pkg in blocker_db.findInstalledBlockers(
10354                         new_pkg, acquire_lock=acquire_lock):
10355                         if new_pkg.slot_atom == blocking_pkg.slot_atom:
10356                                 continue
10357                         if new_pkg.cpv == blocking_pkg.cpv:
10358                                 continue
10359                         blocker_dblinks.append(portage.dblink(
10360                                 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
10361                                 self.pkgsettings[blocking_pkg.root], treetype="vartree",
10362                                 vartree=self.trees[blocking_pkg.root]["vartree"]))
10363
10364                 gc.collect()
10365
10366                 return blocker_dblinks
10367
10368         def _dblink_pkg(self, pkg_dblink):
10369                 cpv = pkg_dblink.mycpv
10370                 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10371                 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10372                 installed = type_name == "installed"
10373                 return self._pkg(cpv, type_name, root_config, installed=installed)
10374
10375         def _append_to_log_path(self, log_path, msg):
10376                 f = open(log_path, 'a')
10377                 try:
10378                         f.write(msg)
10379                 finally:
10380                         f.close()
10381
10382         def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10383
10384                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10385                 log_file = None
10386                 out = sys.stdout
10387                 background = self._background
10388
10389                 if background and log_path is not None:
10390                         log_file = open(log_path, 'a')
10391                         out = log_file
10392
10393                 try:
10394                         for msg in msgs:
10395                                 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10396                 finally:
10397                         if log_file is not None:
10398                                 log_file.close()
10399
10400         def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10401                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10402                 background = self._background
10403
10404                 if log_path is None:
10405                         if not (background and level < logging.WARN):
10406                                 portage.util.writemsg_level(msg,
10407                                         level=level, noiselevel=noiselevel)
10408                 else:
10409                         if not background:
10410                                 portage.util.writemsg_level(msg,
10411                                         level=level, noiselevel=noiselevel)
10412                         self._append_to_log_path(log_path, msg)
10413
10414         def _dblink_ebuild_phase(self,
10415                 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10416                 """
10417                 Using this callback for merge phases allows the scheduler
10418                 to run while these phases execute asynchronously, and allows
10419                 the scheduler control output handling.
10420                 """
10421
10422                 scheduler = self._sched_iface
10423                 settings = pkg_dblink.settings
10424                 pkg = self._dblink_pkg(pkg_dblink)
10425                 background = self._background
10426                 log_path = settings.get("PORTAGE_LOG_FILE")
10427
10428                 ebuild_phase = EbuildPhase(background=background,
10429                         pkg=pkg, phase=phase, scheduler=scheduler,
10430                         settings=settings, tree=pkg_dblink.treetype)
10431                 ebuild_phase.start()
10432                 ebuild_phase.wait()
10433
10434                 return ebuild_phase.returncode
10435
10436         def _generate_digests(self):
10437                 """
10438                 Generate digests if necessary for --digests or FEATURES=digest.
10439                 In order to avoid interference, this must done before parallel
10440                 tasks are started.
10441                 """
10442
10443                 if '--fetchonly' in self.myopts:
10444                         return os.EX_OK
10445
10446                 digest = '--digest' in self.myopts
10447                 if not digest:
10448                         for pkgsettings in self.pkgsettings.itervalues():
10449                                 if 'digest' in pkgsettings.features:
10450                                         digest = True
10451                                         break
10452
10453                 if not digest:
10454                         return os.EX_OK
10455
10456                 for x in self._mergelist:
10457                         if not isinstance(x, Package) or \
10458                                 x.type_name != 'ebuild' or \
10459                                 x.operation != 'merge':
10460                                 continue
10461                         pkgsettings = self.pkgsettings[x.root]
10462                         if '--digest' not in self.myopts and \
10463                                 'digest' not in pkgsettings.features:
10464                                 continue
10465                         portdb = x.root_config.trees['porttree'].dbapi
10466                         ebuild_path = portdb.findname(x.cpv)
10467                         if not ebuild_path:
10468                                 writemsg_level(
10469                                         "!!! Could not locate ebuild for '%s'.\n" \
10470                                         % x.cpv, level=logging.ERROR, noiselevel=-1)
10471                                 return 1
10472                         pkgsettings['O'] = os.path.dirname(ebuild_path)
10473                         if not portage.digestgen([], pkgsettings, myportdb=portdb):
10474                                 writemsg_level(
10475                                         "!!! Unable to generate manifest for '%s'.\n" \
10476                                         % x.cpv, level=logging.ERROR, noiselevel=-1)
10477                                 return 1
10478
10479                 return os.EX_OK
10480
10481         def _check_manifests(self):
10482                 # Verify all the manifests now so that the user is notified of failure
10483                 # as soon as possible.
10484                 if "strict" not in self.settings.features or \
10485                         "--fetchonly" in self.myopts or \
10486                         "--fetch-all-uri" in self.myopts:
10487                         return os.EX_OK
10488
10489                 shown_verifying_msg = False
10490                 quiet_settings = {}
10491                 for myroot, pkgsettings in self.pkgsettings.iteritems():
10492                         quiet_config = portage.config(clone=pkgsettings)
10493                         quiet_config["PORTAGE_QUIET"] = "1"
10494                         quiet_config.backup_changes("PORTAGE_QUIET")
10495                         quiet_settings[myroot] = quiet_config
10496                         del quiet_config
10497
10498                 for x in self._mergelist:
10499                         if not isinstance(x, Package) or \
10500                                 x.type_name != "ebuild":
10501                                 continue
10502
10503                         if not shown_verifying_msg:
10504                                 shown_verifying_msg = True
10505                                 self._status_msg("Verifying ebuild manifests")
10506
10507                         root_config = x.root_config
10508                         portdb = root_config.trees["porttree"].dbapi
10509                         quiet_config = quiet_settings[root_config.root]
10510                         quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10511                         if not portage.digestcheck([], quiet_config, strict=True):
10512                                 return 1
10513
10514                 return os.EX_OK
10515
10516         def _add_prefetchers(self):
10517
10518                 if not self._parallel_fetch:
10519                         return
10520
10521                 if self._parallel_fetch:
10522                         self._status_msg("Starting parallel fetch")
10523
10524                         prefetchers = self._prefetchers
10525                         getbinpkg = "--getbinpkg" in self.myopts
10526
10527                         # In order to avoid "waiting for lock" messages
10528                         # at the beginning, which annoy users, never
10529                         # spawn a prefetcher for the first package.
10530                         for pkg in self._mergelist[1:]:
10531                                 prefetcher = self._create_prefetcher(pkg)
10532                                 if prefetcher is not None:
10533                                         self._task_queues.fetch.add(prefetcher)
10534                                         prefetchers[pkg] = prefetcher
10535
10536         def _create_prefetcher(self, pkg):
10537                 """
10538                 @return: a prefetcher, or None if not applicable
10539                 """
10540                 prefetcher = None
10541
10542                 if not isinstance(pkg, Package):
10543                         pass
10544
10545                 elif pkg.type_name == "ebuild":
10546
10547                         prefetcher = EbuildFetcher(background=True,
10548                                 config_pool=self._ConfigPool(pkg.root,
10549                                 self._allocate_config, self._deallocate_config),
10550                                 fetchonly=1, logfile=self._fetch_log,
10551                                 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10552
10553                 elif pkg.type_name == "binary" and \
10554                         "--getbinpkg" in self.myopts and \
10555                         pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10556
10557                         prefetcher = BinpkgPrefetcher(background=True,
10558                                 pkg=pkg, scheduler=self._sched_iface)
10559
10560                 return prefetcher
10561
10562         def _is_restart_scheduled(self):
10563                 """
10564                 Check if the merge list contains a replacement
10565                 for the current running instance, that will result
10566                 in restart after merge.
10567                 @rtype: bool
10568                 @returns: True if a restart is scheduled, False otherwise.
10569                 """
10570                 if self._opts_no_restart.intersection(self.myopts):
10571                         return False
10572
10573                 mergelist = self._mergelist
10574
10575                 for i, pkg in enumerate(mergelist):
10576                         if self._is_restart_necessary(pkg) and \
10577                                 i != len(mergelist) - 1:
10578                                 return True
10579
10580                 return False
10581
10582         def _is_restart_necessary(self, pkg):
10583                 """
10584                 @return: True if merging the given package
10585                         requires restart, False otherwise.
10586                 """
10587
10588                 # Figure out if we need a restart.
10589                 if pkg.root == self._running_root.root and \
10590                         portage.match_from_list(
10591                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10592                         if self._running_portage:
10593                                 return pkg.cpv != self._running_portage.cpv
10594                         return True
10595                 return False
10596
10597         def _restart_if_necessary(self, pkg):
10598                 """
10599                 Use execv() to restart emerge. This happens
10600                 if portage upgrades itself and there are
10601                 remaining packages in the list.
10602                 """
10603
10604                 if self._opts_no_restart.intersection(self.myopts):
10605                         return
10606
10607                 if not self._is_restart_necessary(pkg):
10608                         return
10609
10610                 if pkg == self._mergelist[-1]:
10611                         return
10612
10613                 self._main_loop_cleanup()
10614
10615                 logger = self._logger
10616                 pkg_count = self._pkg_count
10617                 mtimedb = self._mtimedb
10618                 bad_resume_opts = self._bad_resume_opts
10619
10620                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10621                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10622
10623                 logger.log(" *** RESTARTING " + \
10624                         "emerge via exec() after change of " + \
10625                         "portage version.")
10626
10627                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10628                 mtimedb.commit()
10629                 portage.run_exitfuncs()
10630                 mynewargv = [sys.argv[0], "--resume"]
10631                 resume_opts = self.myopts.copy()
10632                 # For automatic resume, we need to prevent
10633                 # any of bad_resume_opts from leaking in
10634                 # via EMERGE_DEFAULT_OPTS.
10635                 resume_opts["--ignore-default-opts"] = True
10636                 for myopt, myarg in resume_opts.iteritems():
10637                         if myopt not in bad_resume_opts:
10638                                 if myarg is True:
10639                                         mynewargv.append(myopt)
10640                                 else:
10641                                         mynewargv.append(myopt +"="+ str(myarg))
10642                 # priority only needs to be adjusted on the first run
10643                 os.environ["PORTAGE_NICENESS"] = "0"
10644                 os.execv(mynewargv[0], mynewargv)
10645
10646         def merge(self):
10647
10648                 if "--resume" in self.myopts:
10649                         # We're resuming.
10650                         portage.writemsg_stdout(
10651                                 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10652                         self._logger.log(" *** Resuming merge...")
10653
10654                 self._save_resume_list()
10655
10656                 try:
10657                         self._background = self._background_mode()
10658                 except self._unknown_internal_error:
10659                         return 1
10660
10661                 for root in self.trees:
10662                         root_config = self.trees[root]["root_config"]
10663
10664                         # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10665                         # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10666                         # for ensuring sane $PWD (bug #239560) and storing elog messages.
10667                         tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10668                         if not tmpdir or not os.path.isdir(tmpdir):
10669                                 msg = "The directory specified in your " + \
10670                                         "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10671                                 "does not exist. Please create this " + \
10672                                 "directory or correct your PORTAGE_TMPDIR setting."
10673                                 msg = textwrap.wrap(msg, 70)
10674                                 out = portage.output.EOutput()
10675                                 for l in msg:
10676                                         out.eerror(l)
10677                                 return 1
10678
10679                         if self._background:
10680                                 root_config.settings.unlock()
10681                                 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10682                                 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10683                                 root_config.settings.lock()
10684
10685                         self.pkgsettings[root] = portage.config(
10686                                 clone=root_config.settings)
10687
10688                 rval = self._generate_digests()
10689                 if rval != os.EX_OK:
10690                         return rval
10691
10692                 rval = self._check_manifests()
10693                 if rval != os.EX_OK:
10694                         return rval
10695
10696                 keep_going = "--keep-going" in self.myopts
10697                 fetchonly = self._build_opts.fetchonly
10698                 mtimedb = self._mtimedb
10699                 failed_pkgs = self._failed_pkgs
10700
10701                 while True:
10702                         rval = self._merge()
10703                         if rval == os.EX_OK or fetchonly or not keep_going:
10704                                 break
10705                         if "resume" not in mtimedb:
10706                                 break
10707                         mergelist = self._mtimedb["resume"].get("mergelist")
10708                         if not mergelist:
10709                                 break
10710
10711                         if not failed_pkgs:
10712                                 break
10713
10714                         for failed_pkg in failed_pkgs:
10715                                 mergelist.remove(list(failed_pkg.pkg))
10716
10717                         self._failed_pkgs_all.extend(failed_pkgs)
10718                         del failed_pkgs[:]
10719
10720                         if not mergelist:
10721                                 break
10722
10723                         if not self._calc_resume_list():
10724                                 break
10725
10726                         clear_caches(self.trees)
10727                         if not self._mergelist:
10728                                 break
10729
10730                         self._save_resume_list()
10731                         self._pkg_count.curval = 0
10732                         self._pkg_count.maxval = len([x for x in self._mergelist \
10733                                 if isinstance(x, Package) and x.operation == "merge"])
10734                         self._status_display.maxval = self._pkg_count.maxval
10735
10736                 self._logger.log(" *** Finished. Cleaning up...")
10737
10738                 if failed_pkgs:
10739                         self._failed_pkgs_all.extend(failed_pkgs)
10740                         del failed_pkgs[:]
10741
10742                 background = self._background
10743                 failure_log_shown = False
10744                 if background and len(self._failed_pkgs_all) == 1:
10745                         # If only one package failed then just show it's
10746                         # whole log for easy viewing.
10747                         failed_pkg = self._failed_pkgs_all[-1]
10748                         build_dir = failed_pkg.build_dir
10749                         log_file = None
10750
10751                         log_paths = [failed_pkg.build_log]
10752
10753                         log_path = self._locate_failure_log(failed_pkg)
10754                         if log_path is not None:
10755                                 try:
10756                                         log_file = open(log_path)
10757                                 except IOError:
10758                                         pass
10759
10760                         if log_file is not None:
10761                                 try:
10762                                         for line in log_file:
10763                                                 writemsg_level(line, noiselevel=-1)
10764                                 finally:
10765                                         log_file.close()
10766                                 failure_log_shown = True
10767
10768                 # Dump mod_echo output now since it tends to flood the terminal.
10769                 # This allows us to avoid having more important output, generated
10770                 # later, from being swept away by the mod_echo output.
10771                 mod_echo_output =  _flush_elog_mod_echo()
10772
10773                 if background and not failure_log_shown and \
10774                         self._failed_pkgs_all and \
10775                         self._failed_pkgs_die_msgs and \
10776                         not mod_echo_output:
10777
10778                         printer = portage.output.EOutput()
10779                         for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10780                                 root_msg = ""
10781                                 if mysettings["ROOT"] != "/":
10782                                         root_msg = " merged to %s" % mysettings["ROOT"]
10783                                 print
10784                                 printer.einfo("Error messages for package %s%s:" % \
10785                                         (colorize("INFORM", key), root_msg))
10786                                 print
10787                                 for phase in portage.const.EBUILD_PHASES:
10788                                         if phase not in logentries:
10789                                                 continue
10790                                         for msgtype, msgcontent in logentries[phase]:
10791                                                 if isinstance(msgcontent, basestring):
10792                                                         msgcontent = [msgcontent]
10793                                                 for line in msgcontent:
10794                                                         printer.eerror(line.strip("\n"))
10795
10796                 if self._post_mod_echo_msgs:
10797                         for msg in self._post_mod_echo_msgs:
10798                                 msg()
10799
10800                 if len(self._failed_pkgs_all) > 1 or \
10801                         (self._failed_pkgs_all and "--keep-going" in self.myopts):
10802                         if len(self._failed_pkgs_all) > 1:
10803                                 msg = "The following %d packages have " % \
10804                                         len(self._failed_pkgs_all) + \
10805                                         "failed to build or install:"
10806                         else:
10807                                 msg = "The following package has " + \
10808                                         "failed to build or install:"
10809                         prefix = bad(" * ")
10810                         writemsg(prefix + "\n", noiselevel=-1)
10811                         from textwrap import wrap
10812                         for line in wrap(msg, 72):
10813                                 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10814                         writemsg(prefix + "\n", noiselevel=-1)
10815                         for failed_pkg in self._failed_pkgs_all:
10816                                 writemsg("%s\t%s\n" % (prefix,
10817                                         colorize("INFORM", str(failed_pkg.pkg))),
10818                                         noiselevel=-1)
10819                         writemsg(prefix + "\n", noiselevel=-1)
10820
10821                 return rval
10822
10823         def _elog_listener(self, mysettings, key, logentries, fulltext):
10824                 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10825                 if errors:
10826                         self._failed_pkgs_die_msgs.append(
10827                                 (mysettings, key, errors))
10828
10829         def _locate_failure_log(self, failed_pkg):
10830
10831                 build_dir = failed_pkg.build_dir
10832                 log_file = None
10833
10834                 log_paths = [failed_pkg.build_log]
10835
10836                 for log_path in log_paths:
10837                         if not log_path:
10838                                 continue
10839
10840                         try:
10841                                 log_size = os.stat(log_path).st_size
10842                         except OSError:
10843                                 continue
10844
10845                         if log_size == 0:
10846                                 continue
10847
10848                         return log_path
10849
10850                 return None
10851
10852         def _add_packages(self):
10853                 pkg_queue = self._pkg_queue
10854                 for pkg in self._mergelist:
10855                         if isinstance(pkg, Package):
10856                                 pkg_queue.append(pkg)
10857                         elif isinstance(pkg, Blocker):
10858                                 pass
10859
10860         def _system_merge_started(self, merge):
10861                 """
10862                 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
10863                 """
10864                 graph = self._digraph
10865                 if graph is None:
10866                         return
10867                 pkg = merge.merge.pkg
10868
10869                 # Skip this if $ROOT != / since it shouldn't matter if there
10870                 # are unsatisfied system runtime deps in this case.
10871                 if pkg.root != '/':
10872                         return
10873
10874                 completed_tasks = self._completed_tasks
10875                 unsatisfied = self._unsatisfied_system_deps
10876
10877                 def ignore_non_runtime_or_satisfied(priority):
10878                         """
10879                         Ignore non-runtime and satisfied runtime priorities.
10880                         """
10881                         if isinstance(priority, DepPriority) and \
10882                                 not priority.satisfied and \
10883                                 (priority.runtime or priority.runtime_post):
10884                                 return False
10885                         return True
10886
10887                 # When checking for unsatisfied runtime deps, only check
10888                 # direct deps since indirect deps are checked when the
10889                 # corresponding parent is merged.
10890                 for child in graph.child_nodes(pkg,
10891                         ignore_priority=ignore_non_runtime_or_satisfied):
10892                         if not isinstance(child, Package) or \
10893                                 child.operation == 'uninstall':
10894                                 continue
10895                         if child is pkg:
10896                                 continue
10897                         if child.operation == 'merge' and \
10898                                 child not in completed_tasks:
10899                                 unsatisfied.add(child)
10900
10901         def _merge_wait_exit_handler(self, task):
10902                 self._merge_wait_scheduled.remove(task)
10903                 self._merge_exit(task)
10904
10905         def _merge_exit(self, merge):
10906                 self._do_merge_exit(merge)
10907                 self._deallocate_config(merge.merge.settings)
10908                 if merge.returncode == os.EX_OK and \
10909                         not merge.merge.pkg.installed:
10910                         self._status_display.curval += 1
10911                 self._status_display.merges = len(self._task_queues.merge)
10912                 self._schedule()
10913
10914         def _do_merge_exit(self, merge):
10915                 pkg = merge.merge.pkg
10916                 if merge.returncode != os.EX_OK:
10917                         settings = merge.merge.settings
10918                         build_dir = settings.get("PORTAGE_BUILDDIR")
10919                         build_log = settings.get("PORTAGE_LOG_FILE")
10920
10921                         self._failed_pkgs.append(self._failed_pkg(
10922                                 build_dir=build_dir, build_log=build_log,
10923                                 pkg=pkg,
10924                                 returncode=merge.returncode))
10925                         self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
10926
10927                         self._status_display.failed = len(self._failed_pkgs)
10928                         return
10929
10930                 self._task_complete(pkg)
10931                 pkg_to_replace = merge.merge.pkg_to_replace
10932                 if pkg_to_replace is not None:
10933                         # When a package is replaced, mark it's uninstall
10934                         # task complete (if any).
10935                         uninst_hash_key = \
10936                                 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
10937                         self._task_complete(uninst_hash_key)
10938
10939                 if pkg.installed:
10940                         return
10941
10942                 self._restart_if_necessary(pkg)
10943
10944                 # Call mtimedb.commit() after each merge so that
10945                 # --resume still works after being interrupted
10946                 # by reboot, sigkill or similar.
10947                 mtimedb = self._mtimedb
10948                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10949                 if not mtimedb["resume"]["mergelist"]:
10950                         del mtimedb["resume"]
10951                 mtimedb.commit()
10952
10953         def _build_exit(self, build):
10954                 if build.returncode == os.EX_OK:
10955                         self.curval += 1
10956                         merge = PackageMerge(merge=build)
10957                         if not build.build_opts.buildpkgonly and \
10958                                 build.pkg in self._deep_system_deps:
10959                                 # Since dependencies on system packages are frequently
10960                                 # unspecified, merge them only when no builds are executing.
10961                                 self._merge_wait_queue.append(merge)
10962                                 merge.addStartListener(self._system_merge_started)
10963                         else:
10964                                 merge.addExitListener(self._merge_exit)
10965                                 self._task_queues.merge.add(merge)
10966                                 self._status_display.merges = len(self._task_queues.merge)
10967                 else:
10968                         settings = build.settings
10969                         build_dir = settings.get("PORTAGE_BUILDDIR")
10970                         build_log = settings.get("PORTAGE_LOG_FILE")
10971
10972                         self._failed_pkgs.append(self._failed_pkg(
10973                                 build_dir=build_dir, build_log=build_log,
10974                                 pkg=build.pkg,
10975                                 returncode=build.returncode))
10976                         self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
10977
10978                         self._status_display.failed = len(self._failed_pkgs)
10979                         self._deallocate_config(build.settings)
10980                 self._jobs -= 1
10981                 self._status_display.running = self._jobs
10982                 self._schedule()
10983
10984         def _extract_exit(self, build):
10985                 self._build_exit(build)
10986
10987         def _task_complete(self, pkg):
10988                 self._completed_tasks.add(pkg)
10989                 self._unsatisfied_system_deps.discard(pkg)
10990                 self._choose_pkg_return_early = False
10991
10992         def _merge(self):
10993
10994                 self._add_prefetchers()
10995                 self._add_packages()
10996                 pkg_queue = self._pkg_queue
10997                 failed_pkgs = self._failed_pkgs
10998                 portage.locks._quiet = self._background
10999                 portage.elog._emerge_elog_listener = self._elog_listener
11000                 rval = os.EX_OK
11001
11002                 try:
11003                         self._main_loop()
11004                 finally:
11005                         self._main_loop_cleanup()
11006                         portage.locks._quiet = False
11007                         portage.elog._emerge_elog_listener = None
11008                         if failed_pkgs:
11009                                 rval = failed_pkgs[-1].returncode
11010
11011                 return rval
11012
11013         def _main_loop_cleanup(self):
11014                 del self._pkg_queue[:]
11015                 self._completed_tasks.clear()
11016                 self._deep_system_deps.clear()
11017                 self._unsatisfied_system_deps.clear()
11018                 self._choose_pkg_return_early = False
11019                 self._status_display.reset()
11020                 self._digraph = None
11021                 self._task_queues.fetch.clear()
11022
11023         def _choose_pkg(self):
11024                 """
11025                 Choose a task that has all it's dependencies satisfied.
11026                 """
11027
11028                 if self._choose_pkg_return_early:
11029                         return None
11030
11031                 if self._digraph is None:
11032                         if (self._jobs or self._task_queues.merge) and \
11033                                 not ("--nodeps" in self.myopts and \
11034                                 (self._max_jobs is True or self._max_jobs > 1)):
11035                                 self._choose_pkg_return_early = True
11036                                 return None
11037                         return self._pkg_queue.pop(0)
11038
11039                 if not (self._jobs or self._task_queues.merge):
11040                         return self._pkg_queue.pop(0)
11041
11042                 self._prune_digraph()
11043
11044                 chosen_pkg = None
11045                 later = set(self._pkg_queue)
11046                 for pkg in self._pkg_queue:
11047                         later.remove(pkg)
11048                         if not self._dependent_on_scheduled_merges(pkg, later):
11049                                 chosen_pkg = pkg
11050                                 break
11051
11052                 if chosen_pkg is not None:
11053                         self._pkg_queue.remove(chosen_pkg)
11054
11055                 if chosen_pkg is None:
11056                         # There's no point in searching for a package to
11057                         # choose until at least one of the existing jobs
11058                         # completes.
11059                         self._choose_pkg_return_early = True
11060
11061                 return chosen_pkg
11062
11063         def _dependent_on_scheduled_merges(self, pkg, later):
11064                 """
11065                 Traverse the subgraph of the given packages deep dependencies
11066                 to see if it contains any scheduled merges.
11067                 @param pkg: a package to check dependencies for
11068                 @type pkg: Package
11069                 @param later: packages for which dependence should be ignored
11070                         since they will be merged later than pkg anyway and therefore
11071                         delaying the merge of pkg will not result in a more optimal
11072                         merge order
11073                 @type later: set
11074                 @rtype: bool
11075                 @returns: True if the package is dependent, False otherwise.
11076                 """
11077
11078                 graph = self._digraph
11079                 completed_tasks = self._completed_tasks
11080
11081                 dependent = False
11082                 traversed_nodes = set([pkg])
11083                 direct_deps = graph.child_nodes(pkg)
11084                 node_stack = direct_deps
11085                 direct_deps = frozenset(direct_deps)
11086                 while node_stack:
11087                         node = node_stack.pop()
11088                         if node in traversed_nodes:
11089                                 continue
11090                         traversed_nodes.add(node)
11091                         if not ((node.installed and node.operation == "nomerge") or \
11092                                 (node.operation == "uninstall" and \
11093                                 node not in direct_deps) or \
11094                                 node in completed_tasks or \
11095                                 node in later):
11096                                 dependent = True
11097                                 break
11098                         node_stack.extend(graph.child_nodes(node))
11099
11100                 return dependent
11101
11102         def _allocate_config(self, root):
11103                 """
11104                 Allocate a unique config instance for a task in order
11105                 to prevent interference between parallel tasks.
11106                 """
11107                 if self._config_pool[root]:
11108                         temp_settings = self._config_pool[root].pop()
11109                 else:
11110                         temp_settings = portage.config(clone=self.pkgsettings[root])
11111                 # Since config.setcpv() isn't guaranteed to call config.reset() due to
11112                 # performance reasons, call it here to make sure all settings from the
11113                 # previous package get flushed out (such as PORTAGE_LOG_FILE).
11114                 temp_settings.reload()
11115                 temp_settings.reset()
11116                 return temp_settings
11117
11118         def _deallocate_config(self, settings):
11119                 self._config_pool[settings["ROOT"]].append(settings)
11120
11121         def _main_loop(self):
11122
11123                 # Only allow 1 job max if a restart is scheduled
11124                 # due to portage update.
11125                 if self._is_restart_scheduled() or \
11126                         self._opts_no_background.intersection(self.myopts):
11127                         self._set_max_jobs(1)
11128
11129                 merge_queue = self._task_queues.merge
11130
11131                 while self._schedule():
11132                         if self._poll_event_handlers:
11133                                 self._poll_loop()
11134
11135                 while True:
11136                         self._schedule()
11137                         if not (self._jobs or merge_queue):
11138                                 break
11139                         if self._poll_event_handlers:
11140                                 self._poll_loop()
11141
11142         def _keep_scheduling(self):
11143                 return bool(self._pkg_queue and \
11144                         not (self._failed_pkgs and not self._build_opts.fetchonly))
11145
11146         def _schedule_tasks(self):
11147
11148                 # When the number of jobs drops to zero, process all waiting merges.
11149                 if not self._jobs and self._merge_wait_queue:
11150                         for task in self._merge_wait_queue:
11151                                 task.addExitListener(self._merge_wait_exit_handler)
11152                                 self._task_queues.merge.add(task)
11153                         self._status_display.merges = len(self._task_queues.merge)
11154                         self._merge_wait_scheduled.extend(self._merge_wait_queue)
11155                         del self._merge_wait_queue[:]
11156
11157                 self._schedule_tasks_imp()
11158                 self._status_display.display()
11159
11160                 state_change = 0
11161                 for q in self._task_queues.values():
11162                         if q.schedule():
11163                                 state_change += 1
11164
11165                 # Cancel prefetchers if they're the only reason
11166                 # the main poll loop is still running.
11167                 if self._failed_pkgs and not self._build_opts.fetchonly and \
11168                         not (self._jobs or self._task_queues.merge) and \
11169                         self._task_queues.fetch:
11170                         self._task_queues.fetch.clear()
11171                         state_change += 1
11172
11173                 if state_change:
11174                         self._schedule_tasks_imp()
11175                         self._status_display.display()
11176
11177                 return self._keep_scheduling()
11178
11179         def _job_delay(self):
11180                 """
11181                 @rtype: bool
11182                 @returns: True if job scheduling should be delayed, False otherwise.
11183                 """
11184
11185                 if self._jobs and self._max_load is not None:
11186
11187                         current_time = time.time()
11188
11189                         delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
11190                         if delay > self._job_delay_max:
11191                                 delay = self._job_delay_max
11192                         if (current_time - self._previous_job_start_time) < delay:
11193                                 return True
11194
11195                 return False
11196
11197         def _schedule_tasks_imp(self):
11198                 """
11199                 @rtype: bool
11200                 @returns: True if state changed, False otherwise.
11201                 """
11202
11203                 state_change = 0
11204
11205                 while True:
11206
11207                         if not self._keep_scheduling():
11208                                 return bool(state_change)
11209
11210                         if self._choose_pkg_return_early or \
11211                                 self._merge_wait_scheduled or \
11212                                 (self._jobs and self._unsatisfied_system_deps) or \
11213                                 not self._can_add_job() or \
11214                                 self._job_delay():
11215                                 return bool(state_change)
11216
11217                         pkg = self._choose_pkg()
11218                         if pkg is None:
11219                                 return bool(state_change)
11220
11221                         state_change += 1
11222
11223                         if not pkg.installed:
11224                                 self._pkg_count.curval += 1
11225
11226                         task = self._task(pkg)
11227
11228                         if pkg.installed:
11229                                 merge = PackageMerge(merge=task)
11230                                 merge.addExitListener(self._merge_exit)
11231                                 self._task_queues.merge.add(merge)
11232
11233                         elif pkg.built:
11234                                 self._jobs += 1
11235                                 self._previous_job_start_time = time.time()
11236                                 self._status_display.running = self._jobs
11237                                 task.addExitListener(self._extract_exit)
11238                                 self._task_queues.jobs.add(task)
11239
11240                         else:
11241                                 self._jobs += 1
11242                                 self._previous_job_start_time = time.time()
11243                                 self._status_display.running = self._jobs
11244                                 task.addExitListener(self._build_exit)
11245                                 self._task_queues.jobs.add(task)
11246
11247                 return bool(state_change)
11248
11249         def _task(self, pkg):
11250
11251                 pkg_to_replace = None
11252                 if pkg.operation != "uninstall":
11253                         vardb = pkg.root_config.trees["vartree"].dbapi
11254                         previous_cpv = vardb.match(pkg.slot_atom)
11255                         if previous_cpv:
11256                                 previous_cpv = previous_cpv.pop()
11257                                 pkg_to_replace = self._pkg(previous_cpv,
11258                                         "installed", pkg.root_config, installed=True)
11259
11260                 task = MergeListItem(args_set=self._args_set,
11261                         background=self._background, binpkg_opts=self._binpkg_opts,
11262                         build_opts=self._build_opts,
11263                         config_pool=self._ConfigPool(pkg.root,
11264                         self._allocate_config, self._deallocate_config),
11265                         emerge_opts=self.myopts,
11266                         find_blockers=self._find_blockers(pkg), logger=self._logger,
11267                         mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
11268                         pkg_to_replace=pkg_to_replace,
11269                         prefetcher=self._prefetchers.get(pkg),
11270                         scheduler=self._sched_iface,
11271                         settings=self._allocate_config(pkg.root),
11272                         statusMessage=self._status_msg,
11273                         world_atom=self._world_atom)
11274
11275                 return task
11276
11277         def _failed_pkg_msg(self, failed_pkg, action, preposition):
11278                 pkg = failed_pkg.pkg
11279                 msg = "%s to %s %s" % \
11280                         (bad("Failed"), action, colorize("INFORM", pkg.cpv))
11281                 if pkg.root != "/":
11282                         msg += " %s %s" % (preposition, pkg.root)
11283
11284                 log_path = self._locate_failure_log(failed_pkg)
11285                 if log_path is not None:
11286                         msg += ", Log file:"
11287                 self._status_msg(msg)
11288
11289                 if log_path is not None:
11290                         self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
11291
11292         def _status_msg(self, msg):
11293                 """
11294                 Display a brief status message (no newlines) in the status display.
11295                 This is called by tasks to provide feedback to the user. This
11296                 delegates the resposibility of generating \r and \n control characters,
11297                 to guarantee that lines are created or erased when necessary and
11298                 appropriate.
11299
11300                 @type msg: str
11301                 @param msg: a brief status message (no newlines allowed)
11302                 """
11303                 if not self._background:
11304                         writemsg_level("\n")
11305                 self._status_display.displayMessage(msg)
11306
11307         def _save_resume_list(self):
11308                 """
11309                 Do this before verifying the ebuild Manifests since it might
11310                 be possible for the user to use --resume --skipfirst get past
11311                 a non-essential package with a broken digest.
11312                 """
11313                 mtimedb = self._mtimedb
11314                 mtimedb["resume"]["mergelist"] = [list(x) \
11315                         for x in self._mergelist \
11316                         if isinstance(x, Package) and x.operation == "merge"]
11317
11318                 mtimedb.commit()
11319
11320         def _calc_resume_list(self):
11321                 """
11322                 Use the current resume list to calculate a new one,
11323                 dropping any packages with unsatisfied deps.
11324                 @rtype: bool
11325                 @returns: True if successful, False otherwise.
11326                 """
11327                 print colorize("GOOD", "*** Resuming merge...")
11328
11329                 if self._show_list():
11330                         if "--tree" in self.myopts:
11331                                 portage.writemsg_stdout("\n" + \
11332                                         darkgreen("These are the packages that " + \
11333                                         "would be merged, in reverse order:\n\n"))
11334
11335                         else:
11336                                 portage.writemsg_stdout("\n" + \
11337                                         darkgreen("These are the packages that " + \
11338                                         "would be merged, in order:\n\n"))
11339
11340                 show_spinner = "--quiet" not in self.myopts and \
11341                         "--nodeps" not in self.myopts
11342
11343                 if show_spinner:
11344                         print "Calculating dependencies  ",
11345
11346                 myparams = create_depgraph_params(self.myopts, None)
11347                 success = False
11348                 e = None
11349                 try:
11350                         success, mydepgraph, dropped_tasks = resume_depgraph(
11351                                 self.settings, self.trees, self._mtimedb, self.myopts,
11352                                 myparams, self._spinner)
11353                 except depgraph.UnsatisfiedResumeDep, exc:
11354                         # rename variable to avoid python-3.0 error:
11355                         # SyntaxError: can not delete variable 'e' referenced in nested
11356                         #              scope
11357                         e = exc
11358                         mydepgraph = e.depgraph
11359                         dropped_tasks = set()
11360
11361                 if show_spinner:
11362                         print "\b\b... done!"
11363
11364                 if e is not None:
11365                         def unsatisfied_resume_dep_msg():
11366                                 mydepgraph.display_problems()
11367                                 out = portage.output.EOutput()
11368                                 out.eerror("One or more packages are either masked or " + \
11369                                         "have missing dependencies:")
11370                                 out.eerror("")
11371                                 indent = "  "
11372                                 show_parents = set()
11373                                 for dep in e.value:
11374                                         if dep.parent in show_parents:
11375                                                 continue
11376                                         show_parents.add(dep.parent)
11377                                         if dep.atom is None:
11378                                                 out.eerror(indent + "Masked package:")
11379                                                 out.eerror(2 * indent + str(dep.parent))
11380                                                 out.eerror("")
11381                                         else:
11382                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
11383                                                 out.eerror(2 * indent + str(dep.parent))
11384                                                 out.eerror("")
11385                                 msg = "The resume list contains packages " + \
11386                                         "that are either masked or have " + \
11387                                         "unsatisfied dependencies. " + \
11388                                         "Please restart/continue " + \
11389                                         "the operation manually, or use --skipfirst " + \
11390                                         "to skip the first package in the list and " + \
11391                                         "any other packages that may be " + \
11392                                         "masked or have missing dependencies."
11393                                 for line in textwrap.wrap(msg, 72):
11394                                         out.eerror(line)
11395                         self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
11396                         return False
11397
11398                 if success and self._show_list():
11399                         mylist = mydepgraph.altlist()
11400                         if mylist:
11401                                 if "--tree" in self.myopts:
11402                                         mylist.reverse()
11403                                 mydepgraph.display(mylist, favorites=self._favorites)
11404
11405                 if not success:
11406                         self._post_mod_echo_msgs.append(mydepgraph.display_problems)
11407                         return False
11408                 mydepgraph.display_problems()
11409
11410                 mylist = mydepgraph.altlist()
11411                 mydepgraph.break_refs(mylist)
11412                 mydepgraph.break_refs(dropped_tasks)
11413                 self._mergelist = mylist
11414                 self._set_digraph(mydepgraph.schedulerGraph())
11415
11416                 msg_width = 75
11417                 for task in dropped_tasks:
11418                         if not (isinstance(task, Package) and task.operation == "merge"):
11419                                 continue
11420                         pkg = task
11421                         msg = "emerge --keep-going:" + \
11422                                 " %s" % (pkg.cpv,)
11423                         if pkg.root != "/":
11424                                 msg += " for %s" % (pkg.root,)
11425                         msg += " dropped due to unsatisfied dependency."
11426                         for line in textwrap.wrap(msg, msg_width):
11427                                 eerror(line, phase="other", key=pkg.cpv)
11428                         settings = self.pkgsettings[pkg.root]
11429                         # Ensure that log collection from $T is disabled inside
11430                         # elog_process(), since any logs that might exist are
11431                         # not valid here.
11432                         settings.pop("T", None)
11433                         portage.elog.elog_process(pkg.cpv, settings)
11434                         self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
11435
11436                 return True
11437
11438         def _show_list(self):
11439                 myopts = self.myopts
11440                 if "--quiet" not in myopts and \
11441                         ("--ask" in myopts or "--tree" in myopts or \
11442                         "--verbose" in myopts):
11443                         return True
11444                 return False
11445
11446         def _world_atom(self, pkg):
11447                 """
11448                 Add the package to the world file, but only if
11449                 it's supposed to be added. Otherwise, do nothing.
11450                 """
11451
11452                 if set(("--buildpkgonly", "--fetchonly",
11453                         "--fetch-all-uri",
11454                         "--oneshot", "--onlydeps",
11455                         "--pretend")).intersection(self.myopts):
11456                         return
11457
11458                 if pkg.root != self.target_root:
11459                         return
11460
11461                 args_set = self._args_set
11462                 if not args_set.findAtomForPackage(pkg):
11463                         return
11464
11465                 logger = self._logger
11466                 pkg_count = self._pkg_count
11467                 root_config = pkg.root_config
11468                 world_set = root_config.sets["world"]
11469                 world_locked = False
11470                 if hasattr(world_set, "lock"):
11471                         world_set.lock()
11472                         world_locked = True
11473
11474                 try:
11475                         if hasattr(world_set, "load"):
11476                                 world_set.load() # maybe it's changed on disk
11477
11478                         atom = create_world_atom(pkg, args_set, root_config)
11479                         if atom:
11480                                 if hasattr(world_set, "add"):
11481                                         self._status_msg(('Recording %s in "world" ' + \
11482                                                 'favorites file...') % atom)
11483                                         logger.log(" === (%s of %s) Updating world file (%s)" % \
11484                                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
11485                                         world_set.add(atom)
11486                                 else:
11487                                         writemsg_level('\n!!! Unable to record %s in "world"\n' % \
11488                                                 (atom,), level=logging.WARN, noiselevel=-1)
11489                 finally:
11490                         if world_locked:
11491                                 world_set.unlock()
11492
11493         def _pkg(self, cpv, type_name, root_config, installed=False):
11494                 """
11495                 Get a package instance from the cache, or create a new
11496                 one if necessary. Raises KeyError from aux_get if it
11497                 failures for some reason (package does not exist or is
11498                 corrupt).
11499                 """
11500                 operation = "merge"
11501                 if installed:
11502                         operation = "nomerge"
11503
11504                 if self._digraph is not None:
11505                         # Reuse existing instance when available.
11506                         pkg = self._digraph.get(
11507                                 (type_name, root_config.root, cpv, operation))
11508                         if pkg is not None:
11509                                 return pkg
11510
11511                 tree_type = depgraph.pkg_tree_map[type_name]
11512                 db = root_config.trees[tree_type].dbapi
11513                 db_keys = list(self.trees[root_config.root][
11514                         tree_type].dbapi._aux_cache_keys)
11515                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11516                 pkg = Package(cpv=cpv, metadata=metadata,
11517                         root_config=root_config, installed=installed)
11518                 if type_name == "ebuild":
11519                         settings = self.pkgsettings[root_config.root]
11520                         settings.setcpv(pkg)
11521                         pkg.metadata["USE"] = settings["PORTAGE_USE"]
11522                         pkg.metadata['CHOST'] = settings.get('CHOST', '')
11523
11524                 return pkg
11525
11526 class MetadataRegen(PollScheduler):
11527
11528         def __init__(self, portdb, max_jobs=None, max_load=None):
11529                 PollScheduler.__init__(self)
11530                 self._portdb = portdb
11531
11532                 if max_jobs is None:
11533                         max_jobs = 1
11534
11535                 self._max_jobs = max_jobs
11536                 self._max_load = max_load
11537                 self._sched_iface = self._sched_iface_class(
11538                         register=self._register,
11539                         schedule=self._schedule_wait,
11540                         unregister=self._unregister)
11541
11542                 self._valid_pkgs = set()
11543                 self._process_iter = self._iter_metadata_processes()
11544                 self.returncode = os.EX_OK
11545                 self._error_count = 0
11546
11547         def _iter_metadata_processes(self):
11548                 portdb = self._portdb
11549                 valid_pkgs = self._valid_pkgs
11550                 every_cp = portdb.cp_all()
11551                 every_cp.sort(reverse=True)
11552
11553                 while every_cp:
11554                         cp = every_cp.pop()
11555                         portage.writemsg_stdout("Processing %s\n" % cp)
11556                         cpv_list = portdb.cp_list(cp)
11557                         for cpv in cpv_list:
11558                                 valid_pkgs.add(cpv)
11559                                 ebuild_path, repo_path = portdb.findname2(cpv)
11560                                 metadata_process = portdb._metadata_process(
11561                                         cpv, ebuild_path, repo_path)
11562                                 if metadata_process is None:
11563                                         continue
11564                                 yield metadata_process
11565
11566         def run(self):
11567
11568                 portdb = self._portdb
11569                 from portage.cache.cache_errors import CacheError
11570                 dead_nodes = {}
11571
11572                 for mytree in portdb.porttrees:
11573                         try:
11574                                 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11575                         except CacheError, e:
11576                                 portage.writemsg("Error listing cache entries for " + \
11577                                         "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1)
11578                                 del e
11579                                 dead_nodes = None
11580                                 break
11581
11582                 while self._schedule():
11583                         self._poll_loop()
11584
11585                 while self._jobs:
11586                         self._poll_loop()
11587
11588                 if dead_nodes:
11589                         for y in self._valid_pkgs:
11590                                 for mytree in portdb.porttrees:
11591                                         if portdb.findname2(y, mytree=mytree)[0]:
11592                                                 dead_nodes[mytree].discard(y)
11593
11594                         for mytree, nodes in dead_nodes.iteritems():
11595                                 auxdb = portdb.auxdb[mytree]
11596                                 for y in nodes:
11597                                         try:
11598                                                 del auxdb[y]
11599                                         except (KeyError, CacheError):
11600                                                 pass
11601
11602         def _schedule_tasks(self):
11603                 """
11604                 @rtype: bool
11605                 @returns: True if there may be remaining tasks to schedule,
11606                         False otherwise.
11607                 """
11608                 while self._can_add_job():
11609                         try:
11610                                 metadata_process = self._process_iter.next()
11611                         except StopIteration:
11612                                 return False
11613
11614                         self._jobs += 1
11615                         metadata_process.scheduler = self._sched_iface
11616                         metadata_process.addExitListener(self._metadata_exit)
11617                         metadata_process.start()
11618                 return True
11619
11620         def _metadata_exit(self, metadata_process):
11621                 self._jobs -= 1
11622                 if metadata_process.returncode != os.EX_OK:
11623                         self.returncode = 1
11624                         self._error_count += 1
11625                         self._valid_pkgs.discard(metadata_process.cpv)
11626                         portage.writemsg("Error processing %s, continuing...\n" % \
11627                                 (metadata_process.cpv,))
11628                 self._schedule()
11629
11630 class UninstallFailure(portage.exception.PortageException):
11631         """
11632         An instance of this class is raised by unmerge() when
11633         an uninstallation fails.
11634         """
11635         status = 1
11636         def __init__(self, *pargs):
11637                 portage.exception.PortageException.__init__(self, pargs)
11638                 if pargs:
11639                         self.status = pargs[0]
11640
11641 def unmerge(root_config, myopts, unmerge_action,
11642         unmerge_files, ldpath_mtimes, autoclean=0,
11643         clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11644         scheduler=None, writemsg_level=portage.util.writemsg_level):
11645
11646         quiet = "--quiet" in myopts
11647         settings = root_config.settings
11648         sets = root_config.sets
11649         vartree = root_config.trees["vartree"]
11650         candidate_catpkgs=[]
11651         global_unmerge=0
11652         xterm_titles = "notitles" not in settings.features
11653         out = portage.output.EOutput()
11654         pkg_cache = {}
11655         db_keys = list(vartree.dbapi._aux_cache_keys)
11656
11657         def _pkg(cpv):
11658                 pkg = pkg_cache.get(cpv)
11659                 if pkg is None:
11660                         pkg = Package(cpv=cpv, installed=True,
11661                                 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11662                                 root_config=root_config,
11663                                 type_name="installed")
11664                         pkg_cache[cpv] = pkg
11665                 return pkg
11666
11667         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11668         try:
11669                 # At least the parent needs to exist for the lock file.
11670                 portage.util.ensure_dirs(vdb_path)
11671         except portage.exception.PortageException:
11672                 pass
11673         vdb_lock = None
11674         try:
11675                 if os.access(vdb_path, os.W_OK):
11676                         vdb_lock = portage.locks.lockdir(vdb_path)
11677                 realsyslist = sets["system"].getAtoms()
11678                 syslist = []
11679                 for x in realsyslist:
11680                         mycp = portage.dep_getkey(x)
11681                         if mycp in settings.getvirtuals():
11682                                 providers = []
11683                                 for provider in settings.getvirtuals()[mycp]:
11684                                         if vartree.dbapi.match(provider):
11685                                                 providers.append(provider)
11686                                 if len(providers) == 1:
11687                                         syslist.extend(providers)
11688                         else:
11689                                 syslist.append(mycp)
11690         
11691                 mysettings = portage.config(clone=settings)
11692         
11693                 if not unmerge_files:
11694                         if unmerge_action == "unmerge":
11695                                 print
11696                                 print bold("emerge unmerge") + " can only be used with specific package names"
11697                                 print
11698                                 return 0
11699                         else:
11700                                 global_unmerge = 1
11701         
11702                 localtree = vartree
11703                 # process all arguments and add all
11704                 # valid db entries to candidate_catpkgs
11705                 if global_unmerge:
11706                         if not unmerge_files:
11707                                 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11708                 else:
11709                         #we've got command-line arguments
11710                         if not unmerge_files:
11711                                 print "\nNo packages to unmerge have been provided.\n"
11712                                 return 0
11713                         for x in unmerge_files:
11714                                 arg_parts = x.split('/')
11715                                 if x[0] not in [".","/"] and \
11716                                         arg_parts[-1][-7:] != ".ebuild":
11717                                         #possible cat/pkg or dep; treat as such
11718                                         candidate_catpkgs.append(x)
11719                                 elif unmerge_action in ["prune","clean"]:
11720                                         print "\n!!! Prune and clean do not accept individual" + \
11721                                                 " ebuilds as arguments;\n    skipping.\n"
11722                                         continue
11723                                 else:
11724                                         # it appears that the user is specifying an installed
11725                                         # ebuild and we're in "unmerge" mode, so it's ok.
11726                                         if not os.path.exists(x):
11727                                                 print "\n!!! The path '"+x+"' doesn't exist.\n"
11728                                                 return 0
11729         
11730                                         absx   = os.path.abspath(x)
11731                                         sp_absx = absx.split("/")
11732                                         if sp_absx[-1][-7:] == ".ebuild":
11733                                                 del sp_absx[-1]
11734                                                 absx = "/".join(sp_absx)
11735         
11736                                         sp_absx_len = len(sp_absx)
11737         
11738                                         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11739                                         vdb_len  = len(vdb_path)
11740         
11741                                         sp_vdb     = vdb_path.split("/")
11742                                         sp_vdb_len = len(sp_vdb)
11743         
11744                                         if not os.path.exists(absx+"/CONTENTS"):
11745                                                 print "!!! Not a valid db dir: "+str(absx)
11746                                                 return 0
11747         
11748                                         if sp_absx_len <= sp_vdb_len:
11749                                                 # The Path is shorter... so it can't be inside the vdb.
11750                                                 print sp_absx
11751                                                 print absx
11752                                                 print "\n!!!",x,"cannot be inside "+ \
11753                                                         vdb_path+"; aborting.\n"
11754                                                 return 0
11755         
11756                                         for idx in range(0,sp_vdb_len):
11757                                                 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11758                                                         print sp_absx
11759                                                         print absx
11760                                                         print "\n!!!", x, "is not inside "+\
11761                                                                 vdb_path+"; aborting.\n"
11762                                                         return 0
11763         
11764                                         print "="+"/".join(sp_absx[sp_vdb_len:])
11765                                         candidate_catpkgs.append(
11766                                                 "="+"/".join(sp_absx[sp_vdb_len:]))
11767         
11768                 newline=""
11769                 if (not "--quiet" in myopts):
11770                         newline="\n"
11771                 if settings["ROOT"] != "/":
11772                         writemsg_level(darkgreen(newline+ \
11773                                 ">>> Using system located in ROOT tree %s\n" % \
11774                                 settings["ROOT"]))
11775
11776                 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11777                         not ("--quiet" in myopts):
11778                         writemsg_level(darkgreen(newline+\
11779                                 ">>> These are the packages that would be unmerged:\n"))
11780
11781                 # Preservation of order is required for --depclean and --prune so
11782                 # that dependencies are respected. Use all_selected to eliminate
11783                 # duplicate packages since the same package may be selected by
11784                 # multiple atoms.
11785                 pkgmap = []
11786                 all_selected = set()
11787                 for x in candidate_catpkgs:
11788                         # cycle through all our candidate deps and determine
11789                         # what will and will not get unmerged
11790                         try:
11791                                 mymatch = vartree.dbapi.match(x)
11792                         except portage.exception.AmbiguousPackageName, errpkgs:
11793                                 print "\n\n!!! The short ebuild name \"" + \
11794                                         x + "\" is ambiguous.  Please specify"
11795                                 print "!!! one of the following fully-qualified " + \
11796                                         "ebuild names instead:\n"
11797                                 for i in errpkgs[0]:
11798                                         print "    " + green(i)
11799                                 print
11800                                 sys.exit(1)
11801         
11802                         if not mymatch and x[0] not in "<>=~":
11803                                 mymatch = localtree.dep_match(x)
11804                         if not mymatch:
11805                                 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11806                                         (x, unmerge_action), noiselevel=-1)
11807                                 continue
11808
11809                         pkgmap.append(
11810                                 {"protected": set(), "selected": set(), "omitted": set()})
11811                         mykey = len(pkgmap) - 1
11812                         if unmerge_action=="unmerge":
11813                                         for y in mymatch:
11814                                                 if y not in all_selected:
11815                                                         pkgmap[mykey]["selected"].add(y)
11816                                                         all_selected.add(y)
11817                         elif unmerge_action == "prune":
11818                                 if len(mymatch) == 1:
11819                                         continue
11820                                 best_version = mymatch[0]
11821                                 best_slot = vartree.getslot(best_version)
11822                                 best_counter = vartree.dbapi.cpv_counter(best_version)
11823                                 for mypkg in mymatch[1:]:
11824                                         myslot = vartree.getslot(mypkg)
11825                                         mycounter = vartree.dbapi.cpv_counter(mypkg)
11826                                         if (myslot == best_slot and mycounter > best_counter) or \
11827                                                 mypkg == portage.best([mypkg, best_version]):
11828                                                 if myslot == best_slot:
11829                                                         if mycounter < best_counter:
11830                                                                 # On slot collision, keep the one with the
11831                                                                 # highest counter since it is the most
11832                                                                 # recently installed.
11833                                                                 continue
11834                                                 best_version = mypkg
11835                                                 best_slot = myslot
11836                                                 best_counter = mycounter
11837                                 pkgmap[mykey]["protected"].add(best_version)
11838                                 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
11839                                         if mypkg != best_version and mypkg not in all_selected)
11840                                 all_selected.update(pkgmap[mykey]["selected"])
11841                         else:
11842                                 # unmerge_action == "clean"
11843                                 slotmap={}
11844                                 for mypkg in mymatch:
11845                                         if unmerge_action == "clean":
11846                                                 myslot = localtree.getslot(mypkg)
11847                                         else:
11848                                                 # since we're pruning, we don't care about slots
11849                                                 # and put all the pkgs in together
11850                                                 myslot = 0
11851                                         if myslot not in slotmap:
11852                                                 slotmap[myslot] = {}
11853                                         slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
11854
11855                                 for mypkg in vartree.dbapi.cp_list(
11856                                         portage.dep_getkey(mymatch[0])):
11857                                         myslot = vartree.getslot(mypkg)
11858                                         if myslot not in slotmap:
11859                                                 slotmap[myslot] = {}
11860                                         slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
11861
11862                                 for myslot in slotmap:
11863                                         counterkeys = slotmap[myslot].keys()
11864                                         if not counterkeys:
11865                                                 continue
11866                                         counterkeys.sort()
11867                                         pkgmap[mykey]["protected"].add(
11868                                                 slotmap[myslot][counterkeys[-1]])
11869                                         del counterkeys[-1]
11870
11871                                         for counter in counterkeys[:]:
11872                                                 mypkg = slotmap[myslot][counter]
11873                                                 if mypkg not in mymatch:
11874                                                         counterkeys.remove(counter)
11875                                                         pkgmap[mykey]["protected"].add(
11876                                                                 slotmap[myslot][counter])
11877
11878                                         #be pretty and get them in order of merge:
11879                                         for ckey in counterkeys:
11880                                                 mypkg = slotmap[myslot][ckey]
11881                                                 if mypkg not in all_selected:
11882                                                         pkgmap[mykey]["selected"].add(mypkg)
11883                                                         all_selected.add(mypkg)
11884                                         # ok, now the last-merged package
11885                                         # is protected, and the rest are selected
11886                 numselected = len(all_selected)
11887                 if global_unmerge and not numselected:
11888                         portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
11889                         return 0
11890         
11891                 if not numselected:
11892                         portage.writemsg_stdout(
11893                                 "\n>>> No packages selected for removal by " + \
11894                                 unmerge_action + "\n")
11895                         return 0
11896         finally:
11897                 if vdb_lock:
11898                         vartree.dbapi.flush_cache()
11899                         portage.locks.unlockdir(vdb_lock)
11900         
11901         from portage.sets.base import EditablePackageSet
11902         
11903         # generate a list of package sets that are directly or indirectly listed in "world",
11904         # as there is no persistent list of "installed" sets
11905         installed_sets = ["world"]
11906         stop = False
11907         pos = 0
11908         while not stop:
11909                 stop = True
11910                 pos = len(installed_sets)
11911                 for s in installed_sets[pos - 1:]:
11912                         if s not in sets:
11913                                 continue
11914                         candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
11915                         if candidates:
11916                                 stop = False
11917                                 installed_sets += candidates
11918         installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
11919         del stop, pos
11920
11921         # we don't want to unmerge packages that are still listed in user-editable package sets
11922         # listed in "world" as they would be remerged on the next update of "world" or the 
11923         # relevant package sets.
11924         unknown_sets = set()
11925         for cp in xrange(len(pkgmap)):
11926                 for cpv in pkgmap[cp]["selected"].copy():
11927                         try:
11928                                 pkg = _pkg(cpv)
11929                         except KeyError:
11930                                 # It could have been uninstalled
11931                                 # by a concurrent process.
11932                                 continue
11933
11934                         if unmerge_action != "clean" and \
11935                                 root_config.root == "/" and \
11936                                 portage.match_from_list(
11937                                 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
11938                                 msg = ("Not unmerging package %s since there is no valid " + \
11939                                 "reason for portage to unmerge itself.") % (pkg.cpv,)
11940                                 for line in textwrap.wrap(msg, 75):
11941                                         out.eerror(line)
11942                                 # adjust pkgmap so the display output is correct
11943                                 pkgmap[cp]["selected"].remove(cpv)
11944                                 all_selected.remove(cpv)
11945                                 pkgmap[cp]["protected"].add(cpv)
11946                                 continue
11947
11948                         parents = []
11949                         for s in installed_sets:
11950                                 # skip sets that the user requested to unmerge, and skip world 
11951                                 # unless we're unmerging a package set (as the package would be 
11952                                 # removed from "world" later on)
11953                                 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
11954                                         continue
11955
11956                                 if s not in sets:
11957                                         if s in unknown_sets:
11958                                                 continue
11959                                         unknown_sets.add(s)
11960                                         out = portage.output.EOutput()
11961                                         out.eerror(("Unknown set '@%s' in " + \
11962                                                 "%svar/lib/portage/world_sets") % \
11963                                                 (s, root_config.root))
11964                                         continue
11965
11966                                 # only check instances of EditablePackageSet as other classes are generally used for
11967                                 # special purposes and can be ignored here (and are usually generated dynamically, so the
11968                                 # user can't do much about them anyway)
11969                                 if isinstance(sets[s], EditablePackageSet):
11970
11971                                         # This is derived from a snippet of code in the
11972                                         # depgraph._iter_atoms_for_pkg() method.
11973                                         for atom in sets[s].iterAtomsForPackage(pkg):
11974                                                 inst_matches = vartree.dbapi.match(atom)
11975                                                 inst_matches.reverse() # descending order
11976                                                 higher_slot = None
11977                                                 for inst_cpv in inst_matches:
11978                                                         try:
11979                                                                 inst_pkg = _pkg(inst_cpv)
11980                                                         except KeyError:
11981                                                                 # It could have been uninstalled
11982                                                                 # by a concurrent process.
11983                                                                 continue
11984
11985                                                         if inst_pkg.cp != atom.cp:
11986                                                                 continue
11987                                                         if pkg >= inst_pkg:
11988                                                                 # This is descending order, and we're not
11989                                                                 # interested in any versions <= pkg given.
11990                                                                 break
11991                                                         if pkg.slot_atom != inst_pkg.slot_atom:
11992                                                                 higher_slot = inst_pkg
11993                                                                 break
11994                                                 if higher_slot is None:
11995                                                         parents.append(s)
11996                                                         break
11997                         if parents:
11998                                 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
11999                                 #print colorize("WARN", "but still listed in the following package sets:")
12000                                 #print "    %s\n" % ", ".join(parents)
12001                                 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
12002                                 print colorize("WARN", "still referenced by the following package sets:")
12003                                 print "    %s\n" % ", ".join(parents)
12004                                 # adjust pkgmap so the display output is correct
12005                                 pkgmap[cp]["selected"].remove(cpv)
12006                                 all_selected.remove(cpv)
12007                                 pkgmap[cp]["protected"].add(cpv)
12008         
12009         del installed_sets
12010
12011         numselected = len(all_selected)
12012         if not numselected:
12013                 writemsg_level(
12014                         "\n>>> No packages selected for removal by " + \
12015                         unmerge_action + "\n")
12016                 return 0
12017
12018         # Unmerge order only matters in some cases
12019         if not ordered:
12020                 unordered = {}
12021                 for d in pkgmap:
12022                         selected = d["selected"]
12023                         if not selected:
12024                                 continue
12025                         cp = portage.cpv_getkey(iter(selected).next())
12026                         cp_dict = unordered.get(cp)
12027                         if cp_dict is None:
12028                                 cp_dict = {}
12029                                 unordered[cp] = cp_dict
12030                                 for k in d:
12031                                         cp_dict[k] = set()
12032                         for k, v in d.iteritems():
12033                                 cp_dict[k].update(v)
12034                 pkgmap = [unordered[cp] for cp in sorted(unordered)]
12035
12036         for x in xrange(len(pkgmap)):
12037                 selected = pkgmap[x]["selected"]
12038                 if not selected:
12039                         continue
12040                 for mytype, mylist in pkgmap[x].iteritems():
12041                         if mytype == "selected":
12042                                 continue
12043                         mylist.difference_update(all_selected)
12044                 cp = portage.cpv_getkey(iter(selected).next())
12045                 for y in localtree.dep_match(cp):
12046                         if y not in pkgmap[x]["omitted"] and \
12047                                 y not in pkgmap[x]["selected"] and \
12048                                 y not in pkgmap[x]["protected"] and \
12049                                 y not in all_selected:
12050                                 pkgmap[x]["omitted"].add(y)
12051                 if global_unmerge and not pkgmap[x]["selected"]:
12052                         #avoid cluttering the preview printout with stuff that isn't getting unmerged
12053                         continue
12054                 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
12055                         writemsg_level(colorize("BAD","\a\n\n!!! " + \
12056                                 "'%s' is part of your system profile.\n" % cp),
12057                                 level=logging.WARNING, noiselevel=-1)
12058                         writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
12059                                 "be damaging to your system.\n\n"),
12060                                 level=logging.WARNING, noiselevel=-1)
12061                         if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
12062                                 countdown(int(settings["EMERGE_WARNING_DELAY"]),
12063                                         colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
12064                 if not quiet:
12065                         writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
12066                 else:
12067                         writemsg_level(bold(cp) + ": ", noiselevel=-1)
12068                 for mytype in ["selected","protected","omitted"]:
12069                         if not quiet:
12070                                 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
12071                         if pkgmap[x][mytype]:
12072                                 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
12073                                 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
12074                                 for pn, ver, rev in sorted_pkgs:
12075                                         if rev == "r0":
12076                                                 myversion = ver
12077                                         else:
12078                                                 myversion = ver + "-" + rev
12079                                         if mytype == "selected":
12080                                                 writemsg_level(
12081                                                         colorize("UNMERGE_WARN", myversion + " "),
12082                                                         noiselevel=-1)
12083                                         else:
12084                                                 writemsg_level(
12085                                                         colorize("GOOD", myversion + " "), noiselevel=-1)
12086                         else:
12087                                 writemsg_level("none ", noiselevel=-1)
12088                         if not quiet:
12089                                 writemsg_level("\n", noiselevel=-1)
12090                 if quiet:
12091                         writemsg_level("\n", noiselevel=-1)
12092
12093         writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
12094                 " packages are slated for removal.\n")
12095         writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
12096                         " and " + colorize("GOOD", "'omitted'") + \
12097                         " packages will not be removed.\n\n")
12098
12099         if "--pretend" in myopts:
12100                 #we're done... return
12101                 return 0
12102         if "--ask" in myopts:
12103                 if userquery("Would you like to unmerge these packages?")=="No":
12104                         # enter pretend mode for correct formatting of results
12105                         myopts["--pretend"] = True
12106                         print
12107                         print "Quitting."
12108                         print
12109                         return 0
12110         #the real unmerging begins, after a short delay....
12111         if clean_delay and not autoclean:
12112                 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
12113
12114         for x in xrange(len(pkgmap)):
12115                 for y in pkgmap[x]["selected"]:
12116                         writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
12117                         emergelog(xterm_titles, "=== Unmerging... ("+y+")")
12118                         mysplit = y.split("/")
12119                         #unmerge...
12120                         retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
12121                                 mysettings, unmerge_action not in ["clean","prune"],
12122                                 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
12123                                 scheduler=scheduler)
12124
12125                         if retval != os.EX_OK:
12126                                 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
12127                                 if raise_on_error:
12128                                         raise UninstallFailure(retval)
12129                                 sys.exit(retval)
12130                         else:
12131                                 if clean_world and hasattr(sets["world"], "cleanPackage"):
12132                                         sets["world"].cleanPackage(vartree.dbapi, y)
12133                                 emergelog(xterm_titles, " >>> unmerge success: "+y)
12134         if clean_world and hasattr(sets["world"], "remove"):
12135                 for s in root_config.setconfig.active:
12136                         sets["world"].remove(SETPREFIX+s)
12137         return 1
12138
12139 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
12140
12141         if os.path.exists("/usr/bin/install-info"):
12142                 out = portage.output.EOutput()
12143                 regen_infodirs=[]
12144                 for z in infodirs:
12145                         if z=='':
12146                                 continue
12147                         inforoot=normpath(root+z)
12148                         if os.path.isdir(inforoot):
12149                                 infomtime = long(os.stat(inforoot).st_mtime)
12150                                 if inforoot not in prev_mtimes or \
12151                                         prev_mtimes[inforoot] != infomtime:
12152                                                 regen_infodirs.append(inforoot)
12153
12154                 if not regen_infodirs:
12155                         portage.writemsg_stdout("\n")
12156                         out.einfo("GNU info directory index is up-to-date.")
12157                 else:
12158                         portage.writemsg_stdout("\n")
12159                         out.einfo("Regenerating GNU info directory index...")
12160
12161                         dir_extensions = ("", ".gz", ".bz2")
12162                         icount=0
12163                         badcount=0
12164                         errmsg = ""
12165                         for inforoot in regen_infodirs:
12166                                 if inforoot=='':
12167                                         continue
12168
12169                                 if not os.path.isdir(inforoot) or \
12170                                         not os.access(inforoot, os.W_OK):
12171                                         continue
12172
12173                                 file_list = os.listdir(inforoot)
12174                                 file_list.sort()
12175                                 dir_file = os.path.join(inforoot, "dir")
12176                                 moved_old_dir = False
12177                                 processed_count = 0
12178                                 for x in file_list:
12179                                         if x.startswith(".") or \
12180                                                 os.path.isdir(os.path.join(inforoot, x)):
12181                                                 continue
12182                                         if x.startswith("dir"):
12183                                                 skip = False
12184                                                 for ext in dir_extensions:
12185                                                         if x == "dir" + ext or \
12186                                                                 x == "dir" + ext + ".old":
12187                                                                 skip = True
12188                                                                 break
12189                                                 if skip:
12190                                                         continue
12191                                         if processed_count == 0:
12192                                                 for ext in dir_extensions:
12193                                                         try:
12194                                                                 os.rename(dir_file + ext, dir_file + ext + ".old")
12195                                                                 moved_old_dir = True
12196                                                         except EnvironmentError, e:
12197                                                                 if e.errno != errno.ENOENT:
12198                                                                         raise
12199                                                                 del e
12200                                         processed_count += 1
12201                                         myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
12202                                         existsstr="already exists, for file `"
12203                                         if myso!="":
12204                                                 if re.search(existsstr,myso):
12205                                                         # Already exists... Don't increment the count for this.
12206                                                         pass
12207                                                 elif myso[:44]=="install-info: warning: no info dir entry in ":
12208                                                         # This info file doesn't contain a DIR-header: install-info produces this
12209                                                         # (harmless) warning (the --quiet switch doesn't seem to work).
12210                                                         # Don't increment the count for this.
12211                                                         pass
12212                                                 else:
12213                                                         badcount=badcount+1
12214                                                         errmsg += myso + "\n"
12215                                         icount=icount+1
12216
12217                                 if moved_old_dir and not os.path.exists(dir_file):
12218                                         # We didn't generate a new dir file, so put the old file
12219                                         # back where it was originally found.
12220                                         for ext in dir_extensions:
12221                                                 try:
12222                                                         os.rename(dir_file + ext + ".old", dir_file + ext)
12223                                                 except EnvironmentError, e:
12224                                                         if e.errno != errno.ENOENT:
12225                                                                 raise
12226                                                         del e
12227
12228                                 # Clean dir.old cruft so that they don't prevent
12229                                 # unmerge of otherwise empty directories.
12230                                 for ext in dir_extensions:
12231                                         try:
12232                                                 os.unlink(dir_file + ext + ".old")
12233                                         except EnvironmentError, e:
12234                                                 if e.errno != errno.ENOENT:
12235                                                         raise
12236                                                 del e
12237
12238                                 #update mtime so we can potentially avoid regenerating.
12239                                 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
12240
12241                         if badcount:
12242                                 out.eerror("Processed %d info files; %d errors." % \
12243                                         (icount, badcount))
12244                                 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
12245                         else:
12246                                 if icount > 0:
12247                                         out.einfo("Processed %d info files." % (icount,))
12248
12249
12250 def display_news_notification(root_config, myopts):
12251         target_root = root_config.root
12252         trees = root_config.trees
12253         settings = trees["vartree"].settings
12254         portdb = trees["porttree"].dbapi
12255         vardb = trees["vartree"].dbapi
12256         NEWS_PATH = os.path.join("metadata", "news")
12257         UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
12258         newsReaderDisplay = False
12259         update = "--pretend" not in myopts
12260
12261         for repo in portdb.getRepositories():
12262                 unreadItems = checkUpdatedNewsItems(
12263                         portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
12264                 if unreadItems:
12265                         if not newsReaderDisplay:
12266                                 newsReaderDisplay = True
12267                                 print
12268                         print colorize("WARN", " * IMPORTANT:"),
12269                         print "%s news items need reading for repository '%s'." % (unreadItems, repo)
12270                         
12271         
12272         if newsReaderDisplay:
12273                 print colorize("WARN", " *"),
12274                 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
12275                 print
12276
12277 def display_preserved_libs(vardbapi):
12278         MAX_DISPLAY = 3
12279
12280         # Ensure the registry is consistent with existing files.
12281         vardbapi.plib_registry.pruneNonExisting()
12282
12283         if vardbapi.plib_registry.hasEntries():
12284                 print
12285                 print colorize("WARN", "!!!") + " existing preserved libs:"
12286                 plibdata = vardbapi.plib_registry.getPreservedLibs()
12287                 linkmap = vardbapi.linkmap
12288                 consumer_map = {}
12289                 owners = {}
12290                 linkmap_broken = False
12291
12292                 try:
12293                         linkmap.rebuild()
12294                 except portage.exception.CommandNotFound, e:
12295                         writemsg_level("!!! Command Not Found: %s\n" % (e,),
12296                                 level=logging.ERROR, noiselevel=-1)
12297                         del e
12298                         linkmap_broken = True
12299                 else:
12300                         search_for_owners = set()
12301                         for cpv in plibdata:
12302                                 internal_plib_keys = set(linkmap._obj_key(f) \
12303                                         for f in plibdata[cpv])
12304                                 for f in plibdata[cpv]:
12305                                         if f in consumer_map:
12306                                                 continue
12307                                         consumers = []
12308                                         for c in linkmap.findConsumers(f):
12309                                                 # Filter out any consumers that are also preserved libs
12310                                                 # belonging to the same package as the provider.
12311                                                 if linkmap._obj_key(c) not in internal_plib_keys:
12312                                                         consumers.append(c)
12313                                         consumers.sort()
12314                                         consumer_map[f] = consumers
12315                                         search_for_owners.update(consumers[:MAX_DISPLAY+1])
12316
12317                         owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
12318
12319                 for cpv in plibdata:
12320                         print colorize("WARN", ">>>") + " package: %s" % cpv
12321                         samefile_map = {}
12322                         for f in plibdata[cpv]:
12323                                 obj_key = linkmap._obj_key(f)
12324                                 alt_paths = samefile_map.get(obj_key)
12325                                 if alt_paths is None:
12326                                         alt_paths = set()
12327                                         samefile_map[obj_key] = alt_paths
12328                                 alt_paths.add(f)
12329
12330                         for alt_paths in samefile_map.itervalues():
12331                                 alt_paths = sorted(alt_paths)
12332                                 for p in alt_paths:
12333                                         print colorize("WARN", " * ") + " - %s" % (p,)
12334                                 f = alt_paths[0]
12335                                 consumers = consumer_map.get(f, [])
12336                                 for c in consumers[:MAX_DISPLAY]:
12337                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12338                                                 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
12339                                 if len(consumers) == MAX_DISPLAY + 1:
12340                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12341                                                 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
12342                                                 for x in owners.get(consumers[MAX_DISPLAY], [])))
12343                                 elif len(consumers) > MAX_DISPLAY:
12344                                         print colorize("WARN", " * ") + "     used by %d other files" % (len(consumers) - MAX_DISPLAY)
12345                 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
12346
12347
12348 def _flush_elog_mod_echo():
12349         """
12350         Dump the mod_echo output now so that our other
12351         notifications are shown last.
12352         @rtype: bool
12353         @returns: True if messages were shown, False otherwise.
12354         """
12355         messages_shown = False
12356         try:
12357                 from portage.elog import mod_echo
12358         except ImportError:
12359                 pass # happens during downgrade to a version without the module
12360         else:
12361                 messages_shown = bool(mod_echo._items)
12362                 mod_echo.finalize()
12363         return messages_shown
12364
12365 def post_emerge(root_config, myopts, mtimedb, retval):
12366         """
12367         Misc. things to run at the end of a merge session.
12368         
12369         Update Info Files
12370         Update Config Files
12371         Update News Items
12372         Commit mtimeDB
12373         Display preserved libs warnings
12374         Exit Emerge
12375
12376         @param trees: A dictionary mapping each ROOT to it's package databases
12377         @type trees: dict
12378         @param mtimedb: The mtimeDB to store data needed across merge invocations
12379         @type mtimedb: MtimeDB class instance
12380         @param retval: Emerge's return value
12381         @type retval: Int
12382         @rype: None
12383         @returns:
12384         1.  Calls sys.exit(retval)
12385         """
12386
12387         target_root = root_config.root
12388         trees = { target_root : root_config.trees }
12389         vardbapi = trees[target_root]["vartree"].dbapi
12390         settings = vardbapi.settings
12391         info_mtimes = mtimedb["info"]
12392
12393         # Load the most current variables from ${ROOT}/etc/profile.env
12394         settings.unlock()
12395         settings.reload()
12396         settings.regenerate()
12397         settings.lock()
12398
12399         config_protect = settings.get("CONFIG_PROTECT","").split()
12400         infodirs = settings.get("INFOPATH","").split(":") + \
12401                 settings.get("INFODIR","").split(":")
12402
12403         os.chdir("/")
12404
12405         if retval == os.EX_OK:
12406                 exit_msg = " *** exiting successfully."
12407         else:
12408                 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
12409         emergelog("notitles" not in settings.features, exit_msg)
12410
12411         _flush_elog_mod_echo()
12412
12413         counter_hash = settings.get("PORTAGE_COUNTER_HASH")
12414         if "--pretend" in myopts or (counter_hash is not None and \
12415                 counter_hash == vardbapi._counter_hash()):
12416                 display_news_notification(root_config, myopts)
12417                 # If vdb state has not changed then there's nothing else to do.
12418                 sys.exit(retval)
12419
12420         vdb_path = os.path.join(target_root, portage.VDB_PATH)
12421         portage.util.ensure_dirs(vdb_path)
12422         vdb_lock = None
12423         if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
12424                 vdb_lock = portage.locks.lockdir(vdb_path)
12425
12426         if vdb_lock:
12427                 try:
12428                         if "noinfo" not in settings.features:
12429                                 chk_updated_info_files(target_root,
12430                                         infodirs, info_mtimes, retval)
12431                         mtimedb.commit()
12432                 finally:
12433                         if vdb_lock:
12434                                 portage.locks.unlockdir(vdb_lock)
12435
12436         chk_updated_cfg_files(target_root, config_protect)
12437         
12438         display_news_notification(root_config, myopts)
12439         if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
12440                 display_preserved_libs(vardbapi)        
12441
12442         sys.exit(retval)
12443
12444
12445 def chk_updated_cfg_files(target_root, config_protect):
12446         if config_protect:
12447                 #number of directories with some protect files in them
12448                 procount=0
12449                 for x in config_protect:
12450                         x = os.path.join(target_root, x.lstrip(os.path.sep))
12451                         if not os.access(x, os.W_OK):
12452                                 # Avoid Permission denied errors generated
12453                                 # later by `find`.
12454                                 continue
12455                         try:
12456                                 mymode = os.lstat(x).st_mode
12457                         except OSError:
12458                                 continue
12459                         if stat.S_ISLNK(mymode):
12460                                 # We want to treat it like a directory if it
12461                                 # is a symlink to an existing directory.
12462                                 try:
12463                                         real_mode = os.stat(x).st_mode
12464                                         if stat.S_ISDIR(real_mode):
12465                                                 mymode = real_mode
12466                                 except OSError:
12467                                         pass
12468                         if stat.S_ISDIR(mymode):
12469                                 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
12470                         else:
12471                                 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
12472                                         os.path.split(x.rstrip(os.path.sep))
12473                         mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
12474                         a = commands.getstatusoutput(mycommand)
12475                         if a[0] != 0:
12476                                 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
12477                                 sys.stderr.flush()
12478                                 # Show the error message alone, sending stdout to /dev/null.
12479                                 os.system(mycommand + " 1>/dev/null")
12480                         else:
12481                                 files = a[1].split('\0')
12482                                 # split always produces an empty string as the last element
12483                                 if files and not files[-1]:
12484                                         del files[-1]
12485                                 if files:
12486                                         procount += 1
12487                                         print "\n"+colorize("WARN", " * IMPORTANT:"),
12488                                         if stat.S_ISDIR(mymode):
12489                                                  print "%d config files in '%s' need updating." % \
12490                                                         (len(files), x)
12491                                         else:
12492                                                  print "config file '%s' needs updating." % x
12493
12494                 if procount:
12495                         print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
12496                                 " section of the " + bold("emerge")
12497                         print " "+yellow("*")+" man page to learn how to update config files."
12498
12499 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12500         update=False):
12501         """
12502         Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12503         Returns the number of unread (yet relevent) items.
12504         
12505         @param portdb: a portage tree database
12506         @type portdb: pordbapi
12507         @param vardb: an installed package database
12508         @type vardb: vardbapi
12509         @param NEWS_PATH:
12510         @type NEWS_PATH:
12511         @param UNREAD_PATH:
12512         @type UNREAD_PATH:
12513         @param repo_id:
12514         @type repo_id:
12515         @rtype: Integer
12516         @returns:
12517         1.  The number of unread but relevant news items.
12518         
12519         """
12520         from portage.news import NewsManager
12521         manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12522         return manager.getUnreadItems( repo_id, update=update )
12523
12524 def insert_category_into_atom(atom, category):
12525         alphanum = re.search(r'\w', atom)
12526         if alphanum:
12527                 ret = atom[:alphanum.start()] + "%s/" % category + \
12528                         atom[alphanum.start():]
12529         else:
12530                 ret = None
12531         return ret
12532
12533 def is_valid_package_atom(x):
12534         if "/" not in x:
12535                 alphanum = re.search(r'\w', x)
12536                 if alphanum:
12537                         x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12538         return portage.isvalidatom(x)
12539
12540 def show_blocker_docs_link():
12541         print
12542         print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12543         print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12544         print
12545         print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12546         print
12547
12548 def show_mask_docs():
12549         print "For more information, see the MASKED PACKAGES section in the emerge"
12550         print "man page or refer to the Gentoo Handbook."
12551
12552 def action_sync(settings, trees, mtimedb, myopts, myaction):
12553         xterm_titles = "notitles" not in settings.features
12554         emergelog(xterm_titles, " === sync")
12555         myportdir = settings.get("PORTDIR", None)
12556         out = portage.output.EOutput()
12557         if not myportdir:
12558                 sys.stderr.write("!!! PORTDIR is undefined.  Is /etc/make.globals missing?\n")
12559                 sys.exit(1)
12560         if myportdir[-1]=="/":
12561                 myportdir=myportdir[:-1]
12562         try:
12563                 st = os.stat(myportdir)
12564         except OSError:
12565                 st = None
12566         if st is None:
12567                 print ">>>",myportdir,"not found, creating it."
12568                 os.makedirs(myportdir,0755)
12569                 st = os.stat(myportdir)
12570
12571         spawn_kwargs = {}
12572         spawn_kwargs["env"] = settings.environ()
12573         if 'usersync' in settings.features and \
12574                 portage.data.secpass >= 2 and \
12575                 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12576                 st.st_gid != os.getgid() and st.st_mode & 0070):
12577                 try:
12578                         homedir = pwd.getpwuid(st.st_uid).pw_dir
12579                 except KeyError:
12580                         pass
12581                 else:
12582                         # Drop privileges when syncing, in order to match
12583                         # existing uid/gid settings.
12584                         spawn_kwargs["uid"]    = st.st_uid
12585                         spawn_kwargs["gid"]    = st.st_gid
12586                         spawn_kwargs["groups"] = [st.st_gid]
12587                         spawn_kwargs["env"]["HOME"] = homedir
12588                         umask = 0002
12589                         if not st.st_mode & 0020:
12590                                 umask = umask | 0020
12591                         spawn_kwargs["umask"] = umask
12592
12593         syncuri = settings.get("SYNC", "").strip()
12594         if not syncuri:
12595                 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12596                         noiselevel=-1, level=logging.ERROR)
12597                 return 1
12598
12599         vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12600         vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12601
12602         os.umask(0022)
12603         dosyncuri = syncuri
12604         updatecache_flg = False
12605         if myaction == "metadata":
12606                 print "skipping sync"
12607                 updatecache_flg = True
12608         elif ".git" in vcs_dirs:
12609                 # Update existing git repository, and ignore the syncuri. We are
12610                 # going to trust the user and assume that the user is in the branch
12611                 # that he/she wants updated. We'll let the user manage branches with
12612                 # git directly.
12613                 if portage.process.find_binary("git") is None:
12614                         msg = ["Command not found: git",
12615                         "Type \"emerge dev-util/git\" to enable git support."]
12616                         for l in msg:
12617                                 writemsg_level("!!! %s\n" % l,
12618                                         level=logging.ERROR, noiselevel=-1)
12619                         return 1
12620                 msg = ">>> Starting git pull in %s..." % myportdir
12621                 emergelog(xterm_titles, msg )
12622                 writemsg_level(msg + "\n")
12623                 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12624                         (portage._shell_quote(myportdir),), **spawn_kwargs)
12625                 if exitcode != os.EX_OK:
12626                         msg = "!!! git pull error in %s." % myportdir
12627                         emergelog(xterm_titles, msg)
12628                         writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12629                         return exitcode
12630                 msg = ">>> Git pull in %s successful" % myportdir
12631                 emergelog(xterm_titles, msg)
12632                 writemsg_level(msg + "\n")
12633                 exitcode = git_sync_timestamps(settings, myportdir)
12634                 if exitcode == os.EX_OK:
12635                         updatecache_flg = True
12636         elif syncuri[:8]=="rsync://":
12637                 for vcs_dir in vcs_dirs:
12638                         writemsg_level(("!!! %s appears to be under revision " + \
12639                                 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12640                                 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12641                         return 1
12642                 if not os.path.exists("/usr/bin/rsync"):
12643                         print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12644                         print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12645                         sys.exit(1)
12646                 mytimeout=180
12647
12648                 rsync_opts = []
12649                 if settings["PORTAGE_RSYNC_OPTS"] == "":
12650                         portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12651                         rsync_opts.extend([
12652                                 "--recursive",    # Recurse directories
12653                                 "--links",        # Consider symlinks
12654                                 "--safe-links",   # Ignore links outside of tree
12655                                 "--perms",        # Preserve permissions
12656                                 "--times",        # Preserive mod times
12657                                 "--compress",     # Compress the data transmitted
12658                                 "--force",        # Force deletion on non-empty dirs
12659                                 "--whole-file",   # Don't do block transfers, only entire files
12660                                 "--delete",       # Delete files that aren't in the master tree
12661                                 "--stats",        # Show final statistics about what was transfered
12662                                 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12663                                 "--exclude=/distfiles",   # Exclude distfiles from consideration
12664                                 "--exclude=/local",       # Exclude local     from consideration
12665                                 "--exclude=/packages",    # Exclude packages  from consideration
12666                         ])
12667
12668                 else:
12669                         # The below validation is not needed when using the above hardcoded
12670                         # defaults.
12671
12672                         portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12673                         rsync_opts.extend(
12674                                 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
12675                         for opt in ("--recursive", "--times"):
12676                                 if opt not in rsync_opts:
12677                                         portage.writemsg(yellow("WARNING:") + " adding required option " + \
12678                                         "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12679                                         rsync_opts.append(opt)
12680         
12681                         for exclude in ("distfiles", "local", "packages"):
12682                                 opt = "--exclude=/%s" % exclude
12683                                 if opt not in rsync_opts:
12684                                         portage.writemsg(yellow("WARNING:") + \
12685                                         " adding required option %s not included in "  % opt + \
12686                                         "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12687                                         rsync_opts.append(opt)
12688         
12689                         if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12690                                 def rsync_opt_startswith(opt_prefix):
12691                                         for x in rsync_opts:
12692                                                 if x.startswith(opt_prefix):
12693                                                         return True
12694                                         return False
12695
12696                                 if not rsync_opt_startswith("--timeout="):
12697                                         rsync_opts.append("--timeout=%d" % mytimeout)
12698
12699                                 for opt in ("--compress", "--whole-file"):
12700                                         if opt not in rsync_opts:
12701                                                 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12702                                                 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12703                                                 rsync_opts.append(opt)
12704
12705                 if "--quiet" in myopts:
12706                         rsync_opts.append("--quiet")    # Shut up a lot
12707                 else:
12708                         rsync_opts.append("--verbose")  # Print filelist
12709
12710                 if "--verbose" in myopts:
12711                         rsync_opts.append("--progress")  # Progress meter for each file
12712
12713                 if "--debug" in myopts:
12714                         rsync_opts.append("--checksum") # Force checksum on all files
12715
12716                 # Real local timestamp file.
12717                 servertimestampfile = os.path.join(
12718                         myportdir, "metadata", "timestamp.chk")
12719
12720                 content = portage.util.grabfile(servertimestampfile)
12721                 mytimestamp = 0
12722                 if content:
12723                         try:
12724                                 mytimestamp = time.mktime(time.strptime(content[0],
12725                                         "%a, %d %b %Y %H:%M:%S +0000"))
12726                         except (OverflowError, ValueError):
12727                                 pass
12728                 del content
12729
12730                 try:
12731                         rsync_initial_timeout = \
12732                                 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12733                 except ValueError:
12734                         rsync_initial_timeout = 15
12735
12736                 try:
12737                         maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12738                 except SystemExit, e:
12739                         raise # Needed else can't exit
12740                 except:
12741                         maxretries=3 #default number of retries
12742
12743                 retries=0
12744                 user_name, hostname, port = re.split(
12745                         "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12746                 if port is None:
12747                         port=""
12748                 if user_name is None:
12749                         user_name=""
12750                 updatecache_flg=True
12751                 all_rsync_opts = set(rsync_opts)
12752                 extra_rsync_opts = shlex.split(
12753                         settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
12754                 all_rsync_opts.update(extra_rsync_opts)
12755                 family = socket.AF_INET
12756                 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12757                         family = socket.AF_INET
12758                 elif socket.has_ipv6 and \
12759                         ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12760                         family = socket.AF_INET6
12761                 ips=[]
12762                 SERVER_OUT_OF_DATE = -1
12763                 EXCEEDED_MAX_RETRIES = -2
12764                 while (1):
12765                         if ips:
12766                                 del ips[0]
12767                         if ips==[]:
12768                                 try:
12769                                         for addrinfo in socket.getaddrinfo(
12770                                                 hostname, None, family, socket.SOCK_STREAM):
12771                                                 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12772                                                         # IPv6 addresses need to be enclosed in square brackets
12773                                                         ips.append("[%s]" % addrinfo[4][0])
12774                                                 else:
12775                                                         ips.append(addrinfo[4][0])
12776                                         from random import shuffle
12777                                         shuffle(ips)
12778                                 except SystemExit, e:
12779                                         raise # Needed else can't exit
12780                                 except Exception, e:
12781                                         print "Notice:",str(e)
12782                                         dosyncuri=syncuri
12783
12784                         if ips:
12785                                 try:
12786                                         dosyncuri = syncuri.replace(
12787                                                 "//" + user_name + hostname + port + "/",
12788                                                 "//" + user_name + ips[0] + port + "/", 1)
12789                                 except SystemExit, e:
12790                                         raise # Needed else can't exit
12791                                 except Exception, e:
12792                                         print "Notice:",str(e)
12793                                         dosyncuri=syncuri
12794
12795                         if (retries==0):
12796                                 if "--ask" in myopts:
12797                                         if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12798                                                 print
12799                                                 print "Quitting."
12800                                                 print
12801                                                 sys.exit(0)
12802                                 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12803                                 if "--quiet" not in myopts:
12804                                         print ">>> Starting rsync with "+dosyncuri+"..."
12805                         else:
12806                                 emergelog(xterm_titles,
12807                                         ">>> Starting retry %d of %d with %s" % \
12808                                                 (retries,maxretries,dosyncuri))
12809                                 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
12810
12811                         if mytimestamp != 0 and "--quiet" not in myopts:
12812                                 print ">>> Checking server timestamp ..."
12813
12814                         rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
12815
12816                         if "--debug" in myopts:
12817                                 print rsynccommand
12818
12819                         exitcode = os.EX_OK
12820                         servertimestamp = 0
12821                         # Even if there's no timestamp available locally, fetch the
12822                         # timestamp anyway as an initial probe to verify that the server is
12823                         # responsive.  This protects us from hanging indefinitely on a
12824                         # connection attempt to an unresponsive server which rsync's
12825                         # --timeout option does not prevent.
12826                         if True:
12827                                 # Temporary file for remote server timestamp comparison.
12828                                 from tempfile import mkstemp
12829                                 fd, tmpservertimestampfile = mkstemp()
12830                                 os.close(fd)
12831                                 mycommand = rsynccommand[:]
12832                                 mycommand.append(dosyncuri.rstrip("/") + \
12833                                         "/metadata/timestamp.chk")
12834                                 mycommand.append(tmpservertimestampfile)
12835                                 content = None
12836                                 mypids = []
12837                                 try:
12838                                         def timeout_handler(signum, frame):
12839                                                 raise portage.exception.PortageException("timed out")
12840                                         signal.signal(signal.SIGALRM, timeout_handler)
12841                                         # Timeout here in case the server is unresponsive.  The
12842                                         # --timeout rsync option doesn't apply to the initial
12843                                         # connection attempt.
12844                                         if rsync_initial_timeout:
12845                                                 signal.alarm(rsync_initial_timeout)
12846                                         try:
12847                                                 mypids.extend(portage.process.spawn(
12848                                                         mycommand, env=settings.environ(), returnpid=True))
12849                                                 exitcode = os.waitpid(mypids[0], 0)[1]
12850                                                 content = portage.grabfile(tmpservertimestampfile)
12851                                         finally:
12852                                                 if rsync_initial_timeout:
12853                                                         signal.alarm(0)
12854                                                 try:
12855                                                         os.unlink(tmpservertimestampfile)
12856                                                 except OSError:
12857                                                         pass
12858                                 except portage.exception.PortageException, e:
12859                                         # timed out
12860                                         print e
12861                                         del e
12862                                         if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
12863                                                 os.kill(mypids[0], signal.SIGTERM)
12864                                                 os.waitpid(mypids[0], 0)
12865                                         # This is the same code rsync uses for timeout.
12866                                         exitcode = 30
12867                                 else:
12868                                         if exitcode != os.EX_OK:
12869                                                 if exitcode & 0xff:
12870                                                         exitcode = (exitcode & 0xff) << 8
12871                                                 else:
12872                                                         exitcode = exitcode >> 8
12873                                 if mypids:
12874                                         portage.process.spawned_pids.remove(mypids[0])
12875                                 if content:
12876                                         try:
12877                                                 servertimestamp = time.mktime(time.strptime(
12878                                                         content[0], "%a, %d %b %Y %H:%M:%S +0000"))
12879                                         except (OverflowError, ValueError):
12880                                                 pass
12881                                 del mycommand, mypids, content
12882                         if exitcode == os.EX_OK:
12883                                 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
12884                                         emergelog(xterm_titles,
12885                                                 ">>> Cancelling sync -- Already current.")
12886                                         print
12887                                         print ">>>"
12888                                         print ">>> Timestamps on the server and in the local repository are the same."
12889                                         print ">>> Cancelling all further sync action. You are already up to date."
12890                                         print ">>>"
12891                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
12892                                         print ">>>"
12893                                         print
12894                                         sys.exit(0)
12895                                 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
12896                                         emergelog(xterm_titles,
12897                                                 ">>> Server out of date: %s" % dosyncuri)
12898                                         print
12899                                         print ">>>"
12900                                         print ">>> SERVER OUT OF DATE: %s" % dosyncuri
12901                                         print ">>>"
12902                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
12903                                         print ">>>"
12904                                         print
12905                                         exitcode = SERVER_OUT_OF_DATE
12906                                 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
12907                                         # actual sync
12908                                         mycommand = rsynccommand + [dosyncuri+"/", myportdir]
12909                                         exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
12910                                         if exitcode in [0,1,3,4,11,14,20,21]:
12911                                                 break
12912                         elif exitcode in [1,3,4,11,14,20,21]:
12913                                 break
12914                         else:
12915                                 # Code 2 indicates protocol incompatibility, which is expected
12916                                 # for servers with protocol < 29 that don't support
12917                                 # --prune-empty-directories.  Retry for a server that supports
12918                                 # at least rsync protocol version 29 (>=rsync-2.6.4).
12919                                 pass
12920
12921                         retries=retries+1
12922
12923                         if retries<=maxretries:
12924                                 print ">>> Retrying..."
12925                                 time.sleep(11)
12926                         else:
12927                                 # over retries
12928                                 # exit loop
12929                                 updatecache_flg=False
12930                                 exitcode = EXCEEDED_MAX_RETRIES
12931                                 break
12932
12933                 if (exitcode==0):
12934                         emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
12935                 elif exitcode == SERVER_OUT_OF_DATE:
12936                         sys.exit(1)
12937                 elif exitcode == EXCEEDED_MAX_RETRIES:
12938                         sys.stderr.write(
12939                                 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
12940                         sys.exit(1)
12941                 elif (exitcode>0):
12942                         msg = []
12943                         if exitcode==1:
12944                                 msg.append("Rsync has reported that there is a syntax error. Please ensure")
12945                                 msg.append("that your SYNC statement is proper.")
12946                                 msg.append("SYNC=" + settings["SYNC"])
12947                         elif exitcode==11:
12948                                 msg.append("Rsync has reported that there is a File IO error. Normally")
12949                                 msg.append("this means your disk is full, but can be caused by corruption")
12950                                 msg.append("on the filesystem that contains PORTDIR. Please investigate")
12951                                 msg.append("and try again after the problem has been fixed.")
12952                                 msg.append("PORTDIR=" + settings["PORTDIR"])
12953                         elif exitcode==20:
12954                                 msg.append("Rsync was killed before it finished.")
12955                         else:
12956                                 msg.append("Rsync has not successfully finished. It is recommended that you keep")
12957                                 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
12958                                 msg.append("to use rsync due to firewall or other restrictions. This should be a")
12959                                 msg.append("temporary problem unless complications exist with your network")
12960                                 msg.append("(and possibly your system's filesystem) configuration.")
12961                         for line in msg:
12962                                 out.eerror(line)
12963                         sys.exit(exitcode)
12964         elif syncuri[:6]=="cvs://":
12965                 if not os.path.exists("/usr/bin/cvs"):
12966                         print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
12967                         print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
12968                         sys.exit(1)
12969                 cvsroot=syncuri[6:]
12970                 cvsdir=os.path.dirname(myportdir)
12971                 if not os.path.exists(myportdir+"/CVS"):
12972                         #initial checkout
12973                         print ">>> Starting initial cvs checkout with "+syncuri+"..."
12974                         if os.path.exists(cvsdir+"/gentoo-x86"):
12975                                 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
12976                                 sys.exit(1)
12977                         try:
12978                                 os.rmdir(myportdir)
12979                         except OSError, e:
12980                                 if e.errno != errno.ENOENT:
12981                                         sys.stderr.write(
12982                                                 "!!! existing '%s' directory; exiting.\n" % myportdir)
12983                                         sys.exit(1)
12984                                 del e
12985                         if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
12986                                 print "!!! cvs checkout error; exiting."
12987                                 sys.exit(1)
12988                         os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
12989                 else:
12990                         #cvs update
12991                         print ">>> Starting cvs update with "+syncuri+"..."
12992                         retval = portage.process.spawn_bash(
12993                                 "cd %s; cvs -z0 -q update -dP" % \
12994                                 (portage._shell_quote(myportdir),), **spawn_kwargs)
12995                         if retval != os.EX_OK:
12996                                 sys.exit(retval)
12997                 dosyncuri = syncuri
12998         else:
12999                 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
13000                         noiselevel=-1, level=logging.ERROR)
13001                 return 1
13002
13003         if updatecache_flg and  \
13004                 myaction != "metadata" and \
13005                 "metadata-transfer" not in settings.features:
13006                 updatecache_flg = False
13007
13008         # Reload the whole config from scratch.
13009         settings, trees, mtimedb = load_emerge_config(trees=trees)
13010         root_config = trees[settings["ROOT"]]["root_config"]
13011         portdb = trees[settings["ROOT"]]["porttree"].dbapi
13012
13013         if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
13014                 action_metadata(settings, portdb, myopts)
13015
13016         if portage._global_updates(trees, mtimedb["updates"]):
13017                 mtimedb.commit()
13018                 # Reload the whole config from scratch.
13019                 settings, trees, mtimedb = load_emerge_config(trees=trees)
13020                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13021                 root_config = trees[settings["ROOT"]]["root_config"]
13022
13023         mybestpv = portdb.xmatch("bestmatch-visible",
13024                 portage.const.PORTAGE_PACKAGE_ATOM)
13025         mypvs = portage.best(
13026                 trees[settings["ROOT"]]["vartree"].dbapi.match(
13027                 portage.const.PORTAGE_PACKAGE_ATOM))
13028
13029         chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
13030
13031         if myaction != "metadata":
13032                 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
13033                         retval = portage.process.spawn(
13034                                 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
13035                                 dosyncuri], env=settings.environ())
13036                         if retval != os.EX_OK:
13037                                 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
13038
13039         if(mybestpv != mypvs) and not "--quiet" in myopts:
13040                 print
13041                 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
13042                 print red(" * ")+"that you update portage now, before any other packages are updated."
13043                 print
13044                 print red(" * ")+"To update portage, run 'emerge portage' now."
13045                 print
13046         
13047         display_news_notification(root_config, myopts)
13048         return os.EX_OK
13049
13050 def git_sync_timestamps(settings, portdir):
13051         """
13052         Since git doesn't preserve timestamps, synchronize timestamps between
13053         entries and ebuilds/eclasses. Assume the cache has the correct timestamp
13054         for a given file as long as the file in the working tree is not modified
13055         (relative to HEAD).
13056         """
13057         cache_dir = os.path.join(portdir, "metadata", "cache")
13058         if not os.path.isdir(cache_dir):
13059                 return os.EX_OK
13060         writemsg_level(">>> Synchronizing timestamps...\n")
13061
13062         from portage.cache.cache_errors import CacheError
13063         try:
13064                 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
13065                         portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13066         except CacheError, e:
13067                 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
13068                         level=logging.ERROR, noiselevel=-1)
13069                 return 1
13070
13071         ec_dir = os.path.join(portdir, "eclass")
13072         try:
13073                 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
13074                         if f.endswith(".eclass"))
13075         except OSError, e:
13076                 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
13077                         level=logging.ERROR, noiselevel=-1)
13078                 return 1
13079
13080         args = [portage.const.BASH_BINARY, "-c",
13081                 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
13082                 portage._shell_quote(portdir)]
13083         import subprocess
13084         proc = subprocess.Popen(args, stdout=subprocess.PIPE)
13085         modified_files = set(l.rstrip("\n") for l in proc.stdout)
13086         rval = proc.wait()
13087         if rval != os.EX_OK:
13088                 return rval
13089
13090         modified_eclasses = set(ec for ec in ec_names \
13091                 if os.path.join("eclass", ec + ".eclass") in modified_files)
13092
13093         updated_ec_mtimes = {}
13094
13095         for cpv in cache_db:
13096                 cpv_split = portage.catpkgsplit(cpv)
13097                 if cpv_split is None:
13098                         writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
13099                                 level=logging.ERROR, noiselevel=-1)
13100                         continue
13101
13102                 cat, pn, ver, rev = cpv_split
13103                 cat, pf = portage.catsplit(cpv)
13104                 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
13105                 if relative_eb_path in modified_files:
13106                         continue
13107
13108                 try:
13109                         cache_entry = cache_db[cpv]
13110                         eb_mtime = cache_entry.get("_mtime_")
13111                         ec_mtimes = cache_entry.get("_eclasses_")
13112                 except KeyError:
13113                         writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
13114                                 level=logging.ERROR, noiselevel=-1)
13115                         continue
13116                 except CacheError, e:
13117                         writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
13118                                 (cpv, e), level=logging.ERROR, noiselevel=-1)
13119                         continue
13120
13121                 if eb_mtime is None:
13122                         writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
13123                                 level=logging.ERROR, noiselevel=-1)
13124                         continue
13125
13126                 try:
13127                         eb_mtime = long(eb_mtime)
13128                 except ValueError:
13129                         writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
13130                                 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
13131                         continue
13132
13133                 if ec_mtimes is None:
13134                         writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
13135                                 level=logging.ERROR, noiselevel=-1)
13136                         continue
13137
13138                 if modified_eclasses.intersection(ec_mtimes):
13139                         continue
13140
13141                 missing_eclasses = set(ec_mtimes).difference(ec_names)
13142                 if missing_eclasses:
13143                         writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
13144                                 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
13145                                 noiselevel=-1)
13146                         continue
13147
13148                 eb_path = os.path.join(portdir, relative_eb_path)
13149                 try:
13150                         current_eb_mtime = os.stat(eb_path)
13151                 except OSError:
13152                         writemsg_level("!!! Missing ebuild: %s\n" % \
13153                                 (cpv,), level=logging.ERROR, noiselevel=-1)
13154                         continue
13155
13156                 inconsistent = False
13157                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13158                         updated_mtime = updated_ec_mtimes.get(ec)
13159                         if updated_mtime is not None and updated_mtime != ec_mtime:
13160                                 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
13161                                         (cpv, ec), level=logging.ERROR, noiselevel=-1)
13162                                 inconsistent = True
13163                                 break
13164
13165                 if inconsistent:
13166                         continue
13167
13168                 if current_eb_mtime != eb_mtime:
13169                         os.utime(eb_path, (eb_mtime, eb_mtime))
13170
13171                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13172                         if ec in updated_ec_mtimes:
13173                                 continue
13174                         ec_path = os.path.join(ec_dir, ec + ".eclass")
13175                         current_mtime = long(os.stat(ec_path).st_mtime)
13176                         if current_mtime != ec_mtime:
13177                                 os.utime(ec_path, (ec_mtime, ec_mtime))
13178                         updated_ec_mtimes[ec] = ec_mtime
13179
13180         return os.EX_OK
13181
13182 def action_metadata(settings, portdb, myopts):
13183         portage.writemsg_stdout("\n>>> Updating Portage cache:      ")
13184         old_umask = os.umask(0002)
13185         cachedir = os.path.normpath(settings.depcachedir)
13186         if cachedir in ["/",    "/bin", "/dev",  "/etc",  "/home",
13187                                         "/lib", "/opt", "/proc", "/root", "/sbin",
13188                                         "/sys", "/tmp", "/usr",  "/var"]:
13189                 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
13190                         "ROOT DIRECTORY ON YOUR SYSTEM."
13191                 print >> sys.stderr, \
13192                         "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
13193                 sys.exit(73)
13194         if not os.path.exists(cachedir):
13195                 os.mkdir(cachedir)
13196
13197         ec = portage.eclass_cache.cache(portdb.porttree_root)
13198         myportdir = os.path.realpath(settings["PORTDIR"])
13199         cm = settings.load_best_module("portdbapi.metadbmodule")(
13200                 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13201
13202         from portage.cache import util
13203
13204         class percentage_noise_maker(util.quiet_mirroring):
13205                 def __init__(self, dbapi):
13206                         self.dbapi = dbapi
13207                         self.cp_all = dbapi.cp_all()
13208                         l = len(self.cp_all)
13209                         self.call_update_min = 100000000
13210                         self.min_cp_all = l/100.0
13211                         self.count = 1
13212                         self.pstr = ''
13213
13214                 def __iter__(self):
13215                         for x in self.cp_all:
13216                                 self.count += 1
13217                                 if self.count > self.min_cp_all:
13218                                         self.call_update_min = 0
13219                                         self.count = 0
13220                                 for y in self.dbapi.cp_list(x):
13221                                         yield y
13222                         self.call_update_mine = 0
13223
13224                 def update(self, *arg):
13225                         try:                            self.pstr = int(self.pstr) + 1
13226                         except ValueError:      self.pstr = 1
13227                         sys.stdout.write("%s%i%%" % \
13228                                 ("\b" * (len(str(self.pstr))+1), self.pstr))
13229                         sys.stdout.flush()
13230                         self.call_update_min = 10000000
13231
13232                 def finish(self, *arg):
13233                         sys.stdout.write("\b\b\b\b100%\n")
13234                         sys.stdout.flush()
13235
13236         if "--quiet" in myopts:
13237                 def quicky_cpv_generator(cp_all_list):
13238                         for x in cp_all_list:
13239                                 for y in portdb.cp_list(x):
13240                                         yield y
13241                 source = quicky_cpv_generator(portdb.cp_all())
13242                 noise_maker = portage.cache.util.quiet_mirroring()
13243         else:
13244                 noise_maker = source = percentage_noise_maker(portdb)
13245         portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
13246                 eclass_cache=ec, verbose_instance=noise_maker)
13247
13248         sys.stdout.flush()
13249         os.umask(old_umask)
13250
13251 def action_regen(settings, portdb, max_jobs, max_load):
13252         xterm_titles = "notitles" not in settings.features
13253         emergelog(xterm_titles, " === regen")
13254         #regenerate cache entries
13255         portage.writemsg_stdout("Regenerating cache entries...\n")
13256         try:
13257                 os.close(sys.stdin.fileno())
13258         except SystemExit, e:
13259                 raise # Needed else can't exit
13260         except:
13261                 pass
13262         sys.stdout.flush()
13263
13264         regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
13265         regen.run()
13266
13267         portage.writemsg_stdout("done!\n")
13268         return regen.returncode
13269
13270 def action_config(settings, trees, myopts, myfiles):
13271         if len(myfiles) != 1:
13272                 print red("!!! config can only take a single package atom at this time\n")
13273                 sys.exit(1)
13274         if not is_valid_package_atom(myfiles[0]):
13275                 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
13276                         noiselevel=-1)
13277                 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
13278                 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
13279                 sys.exit(1)
13280         print
13281         try:
13282                 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
13283         except portage.exception.AmbiguousPackageName, e:
13284                 # Multiple matches thrown from cpv_expand
13285                 pkgs = e.args[0]
13286         if len(pkgs) == 0:
13287                 print "No packages found.\n"
13288                 sys.exit(0)
13289         elif len(pkgs) > 1:
13290                 if "--ask" in myopts:
13291                         options = []
13292                         print "Please select a package to configure:"
13293                         idx = 0
13294                         for pkg in pkgs:
13295                                 idx += 1
13296                                 options.append(str(idx))
13297                                 print options[-1]+") "+pkg
13298                         print "X) Cancel"
13299                         options.append("X")
13300                         idx = userquery("Selection?", options)
13301                         if idx == "X":
13302                                 sys.exit(0)
13303                         pkg = pkgs[int(idx)-1]
13304                 else:
13305                         print "The following packages available:"
13306                         for pkg in pkgs:
13307                                 print "* "+pkg
13308                         print "\nPlease use a specific atom or the --ask option."
13309                         sys.exit(1)
13310         else:
13311                 pkg = pkgs[0]
13312
13313         print
13314         if "--ask" in myopts:
13315                 if userquery("Ready to configure "+pkg+"?") == "No":
13316                         sys.exit(0)
13317         else:
13318                 print "Configuring pkg..."
13319         print
13320         ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
13321         mysettings = portage.config(clone=settings)
13322         vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
13323         debug = mysettings.get("PORTAGE_DEBUG") == "1"
13324         retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
13325                 mysettings,
13326                 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
13327                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
13328         if retval == os.EX_OK:
13329                 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
13330                         mysettings, debug=debug, mydbapi=vardb, tree="vartree")
13331         print
13332
13333 def action_info(settings, trees, myopts, myfiles):
13334         print getportageversion(settings["PORTDIR"], settings["ROOT"],
13335                 settings.profile_path, settings["CHOST"],
13336                 trees[settings["ROOT"]]["vartree"].dbapi)
13337         header_width = 65
13338         header_title = "System Settings"
13339         if myfiles:
13340                 print header_width * "="
13341                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13342         print header_width * "="
13343         print "System uname: "+platform.platform(aliased=1)
13344
13345         lastSync = portage.grabfile(os.path.join(
13346                 settings["PORTDIR"], "metadata", "timestamp.chk"))
13347         print "Timestamp of tree:",
13348         if lastSync:
13349                 print lastSync[0]
13350         else:
13351                 print "Unknown"
13352
13353         output=commands.getstatusoutput("distcc --version")
13354         if not output[0]:
13355                 print str(output[1].split("\n",1)[0]),
13356                 if "distcc" in settings.features:
13357                         print "[enabled]"
13358                 else:
13359                         print "[disabled]"
13360
13361         output=commands.getstatusoutput("ccache -V")
13362         if not output[0]:
13363                 print str(output[1].split("\n",1)[0]),
13364                 if "ccache" in settings.features:
13365                         print "[enabled]"
13366                 else:
13367                         print "[disabled]"
13368
13369         myvars  = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
13370                    "sys-devel/binutils", "sys-devel/libtool",  "dev-lang/python"]
13371         myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
13372         myvars  = portage.util.unique_array(myvars)
13373         myvars.sort()
13374
13375         for x in myvars:
13376                 if portage.isvalidatom(x):
13377                         pkg_matches = trees["/"]["vartree"].dbapi.match(x)
13378                         pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
13379                         pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
13380                         pkgs = []
13381                         for pn, ver, rev in pkg_matches:
13382                                 if rev != "r0":
13383                                         pkgs.append(ver + "-" + rev)
13384                                 else:
13385                                         pkgs.append(ver)
13386                         if pkgs:
13387                                 pkgs = ", ".join(pkgs)
13388                                 print "%-20s %s" % (x+":", pkgs)
13389                 else:
13390                         print "%-20s %s" % (x+":", "[NOT VALID]")
13391
13392         libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
13393
13394         if "--verbose" in myopts:
13395                 myvars=settings.keys()
13396         else:
13397                 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
13398                           'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
13399                           'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
13400                           'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
13401
13402                 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
13403
13404         myvars = portage.util.unique_array(myvars)
13405         unset_vars = []
13406         myvars.sort()
13407         for x in myvars:
13408                 if x in settings:
13409                         if x != "USE":
13410                                 print '%s="%s"' % (x, settings[x])
13411                         else:
13412                                 use = set(settings["USE"].split())
13413                                 use_expand = settings["USE_EXPAND"].split()
13414                                 use_expand.sort()
13415                                 for varname in use_expand:
13416                                         flag_prefix = varname.lower() + "_"
13417                                         for f in list(use):
13418                                                 if f.startswith(flag_prefix):
13419                                                         use.remove(f)
13420                                 use = list(use)
13421                                 use.sort()
13422                                 print 'USE="%s"' % " ".join(use),
13423                                 for varname in use_expand:
13424                                         myval = settings.get(varname)
13425                                         if myval:
13426                                                 print '%s="%s"' % (varname, myval),
13427                                 print
13428                 else:
13429                         unset_vars.append(x)
13430         if unset_vars:
13431                 print "Unset:  "+", ".join(unset_vars)
13432         print
13433
13434         if "--debug" in myopts:
13435                 for x in dir(portage):
13436                         module = getattr(portage, x)
13437                         if "cvs_id_string" in dir(module):
13438                                 print "%s: %s" % (str(x), str(module.cvs_id_string))
13439
13440         # See if we can find any packages installed matching the strings
13441         # passed on the command line
13442         mypkgs = []
13443         vardb = trees[settings["ROOT"]]["vartree"].dbapi
13444         portdb = trees[settings["ROOT"]]["porttree"].dbapi
13445         for x in myfiles:
13446                 mypkgs.extend(vardb.match(x))
13447
13448         # If some packages were found...
13449         if mypkgs:
13450                 # Get our global settings (we only print stuff if it varies from
13451                 # the current config)
13452                 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
13453                 auxkeys = mydesiredvars + [ "USE", "IUSE"]
13454                 global_vals = {}
13455                 pkgsettings = portage.config(clone=settings)
13456
13457                 for myvar in mydesiredvars:
13458                         global_vals[myvar] = set(settings.get(myvar, "").split())
13459
13460                 # Loop through each package
13461                 # Only print settings if they differ from global settings
13462                 header_title = "Package Settings"
13463                 print header_width * "="
13464                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13465                 print header_width * "="
13466                 from portage.output import EOutput
13467                 out = EOutput()
13468                 for pkg in mypkgs:
13469                         # Get all package specific variables
13470                         auxvalues = vardb.aux_get(pkg, auxkeys)
13471                         valuesmap = {}
13472                         for i in xrange(len(auxkeys)):
13473                                 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
13474                         diff_values = {}
13475                         for myvar in mydesiredvars:
13476                                 # If the package variable doesn't match the
13477                                 # current global variable, something has changed
13478                                 # so set diff_found so we know to print
13479                                 if valuesmap[myvar] != global_vals[myvar]:
13480                                         diff_values[myvar] = valuesmap[myvar]
13481                         valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
13482                         valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
13483                         pkgsettings.reset()
13484                         # If a matching ebuild is no longer available in the tree, maybe it
13485                         # would make sense to compare against the flags for the best
13486                         # available version with the same slot?
13487                         mydb = None
13488                         if portdb.cpv_exists(pkg):
13489                                 mydb = portdb
13490                         pkgsettings.setcpv(pkg, mydb=mydb)
13491                         if valuesmap["IUSE"].intersection(
13492                                 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
13493                                 diff_values["USE"] = valuesmap["USE"]
13494                         # If a difference was found, print the info for
13495                         # this package.
13496                         if diff_values:
13497                                 # Print package info
13498                                 print "%s was built with the following:" % pkg
13499                                 for myvar in mydesiredvars + ["USE"]:
13500                                         if myvar in diff_values:
13501                                                 mylist = list(diff_values[myvar])
13502                                                 mylist.sort()
13503                                                 print "%s=\"%s\"" % (myvar, " ".join(mylist))
13504                                 print
13505                         print ">>> Attempting to run pkg_info() for '%s'" % pkg
13506                         ebuildpath = vardb.findname(pkg)
13507                         if not ebuildpath or not os.path.exists(ebuildpath):
13508                                 out.ewarn("No ebuild found for '%s'" % pkg)
13509                                 continue
13510                         portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13511                                 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13512                                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13513                                 tree="vartree")
13514
13515 def action_search(root_config, myopts, myfiles, spinner):
13516         if not myfiles:
13517                 print "emerge: no search terms provided."
13518         else:
13519                 searchinstance = search(root_config,
13520                         spinner, "--searchdesc" in myopts,
13521                         "--quiet" not in myopts, "--usepkg" in myopts,
13522                         "--usepkgonly" in myopts)
13523                 for mysearch in myfiles:
13524                         try:
13525                                 searchinstance.execute(mysearch)
13526                         except re.error, comment:
13527                                 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13528                                 sys.exit(1)
13529                         searchinstance.output()
13530
13531 def action_depclean(settings, trees, ldpath_mtimes,
13532         myopts, action, myfiles, spinner):
13533         # Kill packages that aren't explicitly merged or are required as a
13534         # dependency of another package. World file is explicit.
13535
13536         # Global depclean or prune operations are not very safe when there are
13537         # missing dependencies since it's unknown how badly incomplete
13538         # the dependency graph is, and we might accidentally remove packages
13539         # that should have been pulled into the graph. On the other hand, it's
13540         # relatively safe to ignore missing deps when only asked to remove
13541         # specific packages.
13542         allow_missing_deps = len(myfiles) > 0
13543
13544         msg = []
13545         msg.append("Always study the list of packages to be cleaned for any obvious\n")
13546         msg.append("mistakes. Packages that are part of the world set will always\n")
13547         msg.append("be kept.  They can be manually added to this set with\n")
13548         msg.append(good("`emerge --noreplace <atom>`") + ".  Packages that are listed in\n")
13549         msg.append("package.provided (see portage(5)) will be removed by\n")
13550         msg.append("depclean, even if they are part of the world set.\n")
13551         msg.append("\n")
13552         msg.append("As a safety measure, depclean will not remove any packages\n")
13553         msg.append("unless *all* required dependencies have been resolved.  As a\n")
13554         msg.append("consequence, it is often necessary to run %s\n" % \
13555                 good("`emerge --update"))
13556         msg.append(good("--newuse --deep @system @world`") + \
13557                 " prior to depclean.\n")
13558
13559         if action == "depclean" and "--quiet" not in myopts and not myfiles:
13560                 portage.writemsg_stdout("\n")
13561                 for x in msg:
13562                         portage.writemsg_stdout(colorize("WARN", " * ") + x)
13563
13564         xterm_titles = "notitles" not in settings.features
13565         myroot = settings["ROOT"]
13566         root_config = trees[myroot]["root_config"]
13567         getSetAtoms = root_config.setconfig.getSetAtoms
13568         vardb = trees[myroot]["vartree"].dbapi
13569
13570         required_set_names = ("system", "world")
13571         required_sets = {}
13572         set_args = []
13573
13574         for s in required_set_names:
13575                 required_sets[s] = InternalPackageSet(
13576                         initial_atoms=getSetAtoms(s))
13577
13578         
13579         # When removing packages, use a temporary version of world
13580         # which excludes packages that are intended to be eligible for
13581         # removal.
13582         world_temp_set = required_sets["world"]
13583         system_set = required_sets["system"]
13584
13585         if not system_set or not world_temp_set:
13586
13587                 if not system_set:
13588                         writemsg_level("!!! You have no system list.\n",
13589                                 level=logging.ERROR, noiselevel=-1)
13590
13591                 if not world_temp_set:
13592                         writemsg_level("!!! You have no world file.\n",
13593                                         level=logging.WARNING, noiselevel=-1)
13594
13595                 writemsg_level("!!! Proceeding is likely to " + \
13596                         "break your installation.\n",
13597                         level=logging.WARNING, noiselevel=-1)
13598                 if "--pretend" not in myopts:
13599                         countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
13600
13601         if action == "depclean":
13602                 emergelog(xterm_titles, " >>> depclean")
13603
13604         import textwrap
13605         args_set = InternalPackageSet()
13606         if myfiles:
13607                 for x in myfiles:
13608                         if not is_valid_package_atom(x):
13609                                 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
13610                                         level=logging.ERROR, noiselevel=-1)
13611                                 writemsg_level("!!! Please check ebuild(5) for full details.\n")
13612                                 return
13613                         try:
13614                                 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
13615                         except portage.exception.AmbiguousPackageName, e:
13616                                 msg = "The short ebuild name \"" + x + \
13617                                         "\" is ambiguous.  Please specify " + \
13618                                         "one of the following " + \
13619                                         "fully-qualified ebuild names instead:"
13620                                 for line in textwrap.wrap(msg, 70):
13621                                         writemsg_level("!!! %s\n" % (line,),
13622                                                 level=logging.ERROR, noiselevel=-1)
13623                                 for i in e[0]:
13624                                         writemsg_level("    %s\n" % colorize("INFORM", i),
13625                                                 level=logging.ERROR, noiselevel=-1)
13626                                 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13627                                 return
13628                         args_set.add(atom)
13629                 matched_packages = False
13630                 for x in args_set:
13631                         if vardb.match(x):
13632                                 matched_packages = True
13633                                 break
13634                 if not matched_packages:
13635                         writemsg_level(">>> No packages selected for removal by %s\n" % \
13636                                 action)
13637                         return
13638
13639         writemsg_level("\nCalculating dependencies  ")
13640         resolver_params = create_depgraph_params(myopts, "remove")
13641         resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
13642         vardb = resolver.trees[myroot]["vartree"].dbapi
13643
13644         if action == "depclean":
13645
13646                 if args_set:
13647                         # Pull in everything that's installed but not matched
13648                         # by an argument atom since we don't want to clean any
13649                         # package if something depends on it.
13650
13651                         world_temp_set.clear()
13652                         for pkg in vardb:
13653                                 spinner.update()
13654
13655                                 try:
13656                                         if args_set.findAtomForPackage(pkg) is None:
13657                                                 world_temp_set.add("=" + pkg.cpv)
13658                                                 continue
13659                                 except portage.exception.InvalidDependString, e:
13660                                         show_invalid_depstring_notice(pkg,
13661                                                 pkg.metadata["PROVIDE"], str(e))
13662                                         del e
13663                                         world_temp_set.add("=" + pkg.cpv)
13664                                         continue
13665
13666         elif action == "prune":
13667
13668                 # Pull in everything that's installed since we don't
13669                 # to prune a package if something depends on it.
13670                 world_temp_set.clear()
13671                 world_temp_set.update(vardb.cp_all())
13672
13673                 if not args_set:
13674
13675                         # Try to prune everything that's slotted.
13676                         for cp in vardb.cp_all():
13677                                 if len(vardb.cp_list(cp)) > 1:
13678                                         args_set.add(cp)
13679
13680                 # Remove atoms from world that match installed packages
13681                 # that are also matched by argument atoms, but do not remove
13682                 # them if they match the highest installed version.
13683                 for pkg in vardb:
13684                         spinner.update()
13685                         pkgs_for_cp = vardb.match_pkgs(pkg.cp)
13686                         if not pkgs_for_cp or pkg not in pkgs_for_cp:
13687                                 raise AssertionError("package expected in matches: " + \
13688                                         "cp = %s, cpv = %s matches = %s" % \
13689                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13690
13691                         highest_version = pkgs_for_cp[-1]
13692                         if pkg == highest_version:
13693                                 # pkg is the highest version
13694                                 world_temp_set.add("=" + pkg.cpv)
13695                                 continue
13696
13697                         if len(pkgs_for_cp) <= 1:
13698                                 raise AssertionError("more packages expected: " + \
13699                                         "cp = %s, cpv = %s matches = %s" % \
13700                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13701
13702                         try:
13703                                 if args_set.findAtomForPackage(pkg) is None:
13704                                         world_temp_set.add("=" + pkg.cpv)
13705                                         continue
13706                         except portage.exception.InvalidDependString, e:
13707                                 show_invalid_depstring_notice(pkg,
13708                                         pkg.metadata["PROVIDE"], str(e))
13709                                 del e
13710                                 world_temp_set.add("=" + pkg.cpv)
13711                                 continue
13712
13713         set_args = {}
13714         for s, package_set in required_sets.iteritems():
13715                 set_atom = SETPREFIX + s
13716                 set_arg = SetArg(arg=set_atom, set=package_set,
13717                         root_config=resolver.roots[myroot])
13718                 set_args[s] = set_arg
13719                 for atom in set_arg.set:
13720                         resolver._dep_stack.append(
13721                                 Dependency(atom=atom, root=myroot, parent=set_arg))
13722                         resolver.digraph.add(set_arg, None)
13723
13724         success = resolver._complete_graph()
13725         writemsg_level("\b\b... done!\n")
13726
13727         resolver.display_problems()
13728
13729         if not success:
13730                 return 1
13731
13732         def unresolved_deps():
13733
13734                 unresolvable = set()
13735                 for dep in resolver._initially_unsatisfied_deps:
13736                         if isinstance(dep.parent, Package) and \
13737                                 (dep.priority > UnmergeDepPriority.SOFT):
13738                                 unresolvable.add((dep.atom, dep.parent.cpv))
13739
13740                 if not unresolvable:
13741                         return False
13742
13743                 if unresolvable and not allow_missing_deps:
13744                         prefix = bad(" * ")
13745                         msg = []
13746                         msg.append("Dependencies could not be completely resolved due to")
13747                         msg.append("the following required packages not being installed:")
13748                         msg.append("")
13749                         for atom, parent in unresolvable:
13750                                 msg.append("  %s pulled in by:" % (atom,))
13751                                 msg.append("    %s" % (parent,))
13752                                 msg.append("")
13753                         msg.append("Have you forgotten to run " + \
13754                                 good("`emerge --update --newuse --deep @system @world`") + " prior")
13755                         msg.append(("to %s? It may be necessary to manually " + \
13756                                 "uninstall packages that no longer") % action)
13757                         msg.append("exist in the portage tree since " + \
13758                                 "it may not be possible to satisfy their")
13759                         msg.append("dependencies.  Also, be aware of " + \
13760                                 "the --with-bdeps option that is documented")
13761                         msg.append("in " + good("`man emerge`") + ".")
13762                         if action == "prune":
13763                                 msg.append("")
13764                                 msg.append("If you would like to ignore " + \
13765                                         "dependencies then use %s." % good("--nodeps"))
13766                         writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
13767                                 level=logging.ERROR, noiselevel=-1)
13768                         return True
13769                 return False
13770
13771         if unresolved_deps():
13772                 return 1
13773
13774         graph = resolver.digraph.copy()
13775         required_pkgs_total = 0
13776         for node in graph:
13777                 if isinstance(node, Package):
13778                         required_pkgs_total += 1
13779
13780         def show_parents(child_node):
13781                 parent_nodes = graph.parent_nodes(child_node)
13782                 if not parent_nodes:
13783                         # With --prune, the highest version can be pulled in without any
13784                         # real parent since all installed packages are pulled in.  In that
13785                         # case there's nothing to show here.
13786                         return
13787                 parent_strs = []
13788                 for node in parent_nodes:
13789                         parent_strs.append(str(getattr(node, "cpv", node)))
13790                 parent_strs.sort()
13791                 msg = []
13792                 msg.append("  %s pulled in by:\n" % (child_node.cpv,))
13793                 for parent_str in parent_strs:
13794                         msg.append("    %s\n" % (parent_str,))
13795                 msg.append("\n")
13796                 portage.writemsg_stdout("".join(msg), noiselevel=-1)
13797
13798         def cmp_pkg_cpv(pkg1, pkg2):
13799                 """Sort Package instances by cpv."""
13800                 if pkg1.cpv > pkg2.cpv:
13801                         return 1
13802                 elif pkg1.cpv == pkg2.cpv:
13803                         return 0
13804                 else:
13805                         return -1
13806
13807         def create_cleanlist():
13808                 pkgs_to_remove = []
13809
13810                 if action == "depclean":
13811                         if args_set:
13812
13813                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13814                                         arg_atom = None
13815                                         try:
13816                                                 arg_atom = args_set.findAtomForPackage(pkg)
13817                                         except portage.exception.InvalidDependString:
13818                                                 # this error has already been displayed by now
13819                                                 continue
13820
13821                                         if arg_atom:
13822                                                 if pkg not in graph:
13823                                                         pkgs_to_remove.append(pkg)
13824                                                 elif "--verbose" in myopts:
13825                                                         show_parents(pkg)
13826
13827                         else:
13828                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13829                                         if pkg not in graph:
13830                                                 pkgs_to_remove.append(pkg)
13831                                         elif "--verbose" in myopts:
13832                                                 show_parents(pkg)
13833
13834                 elif action == "prune":
13835                         # Prune really uses all installed instead of world. It's not
13836                         # a real reverse dependency so don't display it as such.
13837                         graph.remove(set_args["world"])
13838
13839                         for atom in args_set:
13840                                 for pkg in vardb.match_pkgs(atom):
13841                                         if pkg not in graph:
13842                                                 pkgs_to_remove.append(pkg)
13843                                         elif "--verbose" in myopts:
13844                                                 show_parents(pkg)
13845
13846                 if not pkgs_to_remove:
13847                         writemsg_level(
13848                                 ">>> No packages selected for removal by %s\n" % action)
13849                         if "--verbose" not in myopts:
13850                                 writemsg_level(
13851                                         ">>> To see reverse dependencies, use %s\n" % \
13852                                                 good("--verbose"))
13853                         if action == "prune":
13854                                 writemsg_level(
13855                                         ">>> To ignore dependencies, use %s\n" % \
13856                                                 good("--nodeps"))
13857
13858                 return pkgs_to_remove
13859
13860         cleanlist = create_cleanlist()
13861
13862         if len(cleanlist):
13863                 clean_set = set(cleanlist)
13864
13865                 # Check if any of these package are the sole providers of libraries
13866                 # with consumers that have not been selected for removal. If so, these
13867                 # packages and any dependencies need to be added to the graph.
13868                 real_vardb = trees[myroot]["vartree"].dbapi
13869                 linkmap = real_vardb.linkmap
13870                 liblist = linkmap.listLibraryObjects()
13871                 consumer_cache = {}
13872                 provider_cache = {}
13873                 soname_cache = {}
13874                 consumer_map = {}
13875
13876                 writemsg_level(">>> Checking for lib consumers...\n")
13877
13878                 for pkg in cleanlist:
13879                         pkg_dblink = real_vardb._dblink(pkg.cpv)
13880                         provided_libs = set()
13881
13882                         for lib in liblist:
13883                                 if pkg_dblink.isowner(lib, myroot):
13884                                         provided_libs.add(lib)
13885
13886                         if not provided_libs:
13887                                 continue
13888
13889                         consumers = {}
13890                         for lib in provided_libs:
13891                                 lib_consumers = consumer_cache.get(lib)
13892                                 if lib_consumers is None:
13893                                         lib_consumers = linkmap.findConsumers(lib)
13894                                         consumer_cache[lib] = lib_consumers
13895                                 if lib_consumers:
13896                                         consumers[lib] = lib_consumers
13897
13898                         if not consumers:
13899                                 continue
13900
13901                         for lib, lib_consumers in consumers.items():
13902                                 for consumer_file in list(lib_consumers):
13903                                         if pkg_dblink.isowner(consumer_file, myroot):
13904                                                 lib_consumers.remove(consumer_file)
13905                                 if not lib_consumers:
13906                                         del consumers[lib]
13907
13908                         if not consumers:
13909                                 continue
13910
13911                         for lib, lib_consumers in consumers.iteritems():
13912
13913                                 soname = soname_cache.get(lib)
13914                                 if soname is None:
13915                                         soname = linkmap.getSoname(lib)
13916                                         soname_cache[lib] = soname
13917
13918                                 consumer_providers = []
13919                                 for lib_consumer in lib_consumers:
13920                                         providers = provider_cache.get(lib)
13921                                         if providers is None:
13922                                                 providers = linkmap.findProviders(lib_consumer)
13923                                                 provider_cache[lib_consumer] = providers
13924                                         if soname not in providers:
13925                                                 # Why does this happen?
13926                                                 continue
13927                                         consumer_providers.append(
13928                                                 (lib_consumer, providers[soname]))
13929
13930                                 consumers[lib] = consumer_providers
13931
13932                         consumer_map[pkg] = consumers
13933
13934                 if consumer_map:
13935
13936                         search_files = set()
13937                         for consumers in consumer_map.itervalues():
13938                                 for lib, consumer_providers in consumers.iteritems():
13939                                         for lib_consumer, providers in consumer_providers:
13940                                                 search_files.add(lib_consumer)
13941                                                 search_files.update(providers)
13942
13943                         writemsg_level(">>> Assigning files to packages...\n")
13944                         file_owners = real_vardb._owners.getFileOwnerMap(search_files)
13945
13946                         for pkg, consumers in consumer_map.items():
13947                                 for lib, consumer_providers in consumers.items():
13948                                         lib_consumers = set()
13949
13950                                         for lib_consumer, providers in consumer_providers:
13951                                                 owner_set = file_owners.get(lib_consumer)
13952                                                 provider_dblinks = set()
13953                                                 provider_pkgs = set()
13954
13955                                                 if len(providers) > 1:
13956                                                         for provider in providers:
13957                                                                 provider_set = file_owners.get(provider)
13958                                                                 if provider_set is not None:
13959                                                                         provider_dblinks.update(provider_set)
13960
13961                                                 if len(provider_dblinks) > 1:
13962                                                         for provider_dblink in provider_dblinks:
13963                                                                 pkg_key = ("installed", myroot,
13964                                                                         provider_dblink.mycpv, "nomerge")
13965                                                                 if pkg_key not in clean_set:
13966                                                                         provider_pkgs.add(vardb.get(pkg_key))
13967
13968                                                 if provider_pkgs:
13969                                                         continue
13970
13971                                                 if owner_set is not None:
13972                                                         lib_consumers.update(owner_set)
13973
13974                                         for consumer_dblink in list(lib_consumers):
13975                                                 if ("installed", myroot, consumer_dblink.mycpv,
13976                                                         "nomerge") in clean_set:
13977                                                         lib_consumers.remove(consumer_dblink)
13978                                                         continue
13979
13980                                         if lib_consumers:
13981                                                 consumers[lib] = lib_consumers
13982                                         else:
13983                                                 del consumers[lib]
13984                                 if not consumers:
13985                                         del consumer_map[pkg]
13986
13987                 if consumer_map:
13988                         # TODO: Implement a package set for rebuilding consumer packages.
13989
13990                         msg = "In order to avoid breakage of link level " + \
13991                                 "dependencies, one or more packages will not be removed. " + \
13992                                 "This can be solved by rebuilding " + \
13993                                 "the packages that pulled them in."
13994
13995                         prefix = bad(" * ")
13996                         from textwrap import wrap
13997                         writemsg_level("".join(prefix + "%s\n" % line for \
13998                                 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
13999
14000                         msg = []
14001                         for pkg, consumers in consumer_map.iteritems():
14002                                 unique_consumers = set(chain(*consumers.values()))
14003                                 unique_consumers = sorted(consumer.mycpv \
14004                                         for consumer in unique_consumers)
14005                                 msg.append("")
14006                                 msg.append("  %s pulled in by:" % (pkg.cpv,))
14007                                 for consumer in unique_consumers:
14008                                         msg.append("    %s" % (consumer,))
14009                         msg.append("")
14010                         writemsg_level("".join(prefix + "%s\n" % line for line in msg),
14011                                 level=logging.WARNING, noiselevel=-1)
14012
14013                         # Add lib providers to the graph as children of lib consumers,
14014                         # and also add any dependencies pulled in by the provider.
14015                         writemsg_level(">>> Adding lib providers to graph...\n")
14016
14017                         for pkg, consumers in consumer_map.iteritems():
14018                                 for consumer_dblink in set(chain(*consumers.values())):
14019                                         consumer_pkg = vardb.get(("installed", myroot,
14020                                                 consumer_dblink.mycpv, "nomerge"))
14021                                         if not resolver._add_pkg(pkg,
14022                                                 Dependency(parent=consumer_pkg,
14023                                                 priority=UnmergeDepPriority(runtime=True),
14024                                                 root=pkg.root)):
14025                                                 resolver.display_problems()
14026                                                 return 1
14027
14028                         writemsg_level("\nCalculating dependencies  ")
14029                         success = resolver._complete_graph()
14030                         writemsg_level("\b\b... done!\n")
14031                         resolver.display_problems()
14032                         if not success:
14033                                 return 1
14034                         if unresolved_deps():
14035                                 return 1
14036
14037                         graph = resolver.digraph.copy()
14038                         required_pkgs_total = 0
14039                         for node in graph:
14040                                 if isinstance(node, Package):
14041                                         required_pkgs_total += 1
14042                         cleanlist = create_cleanlist()
14043                         if not cleanlist:
14044                                 return 0
14045                         clean_set = set(cleanlist)
14046
14047                 # Use a topological sort to create an unmerge order such that
14048                 # each package is unmerged before it's dependencies. This is
14049                 # necessary to avoid breaking things that may need to run
14050                 # during pkg_prerm or pkg_postrm phases.
14051
14052                 # Create a new graph to account for dependencies between the
14053                 # packages being unmerged.
14054                 graph = digraph()
14055                 del cleanlist[:]
14056
14057                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
14058                 runtime = UnmergeDepPriority(runtime=True)
14059                 runtime_post = UnmergeDepPriority(runtime_post=True)
14060                 buildtime = UnmergeDepPriority(buildtime=True)
14061                 priority_map = {
14062                         "RDEPEND": runtime,
14063                         "PDEPEND": runtime_post,
14064                         "DEPEND": buildtime,
14065                 }
14066
14067                 for node in clean_set:
14068                         graph.add(node, None)
14069                         mydeps = []
14070                         node_use = node.metadata["USE"].split()
14071                         for dep_type in dep_keys:
14072                                 depstr = node.metadata[dep_type]
14073                                 if not depstr:
14074                                         continue
14075                                 try:
14076                                         portage.dep._dep_check_strict = False
14077                                         success, atoms = portage.dep_check(depstr, None, settings,
14078                                                 myuse=node_use, trees=resolver._graph_trees,
14079                                                 myroot=myroot)
14080                                 finally:
14081                                         portage.dep._dep_check_strict = True
14082                                 if not success:
14083                                         # Ignore invalid deps of packages that will
14084                                         # be uninstalled anyway.
14085                                         continue
14086
14087                                 priority = priority_map[dep_type]
14088                                 for atom in atoms:
14089                                         if not isinstance(atom, portage.dep.Atom):
14090                                                 # Ignore invalid atoms returned from dep_check().
14091                                                 continue
14092                                         if atom.blocker:
14093                                                 continue
14094                                         matches = vardb.match_pkgs(atom)
14095                                         if not matches:
14096                                                 continue
14097                                         for child_node in matches:
14098                                                 if child_node in clean_set:
14099                                                         graph.add(child_node, node, priority=priority)
14100
14101                 ordered = True
14102                 if len(graph.order) == len(graph.root_nodes()):
14103                         # If there are no dependencies between packages
14104                         # let unmerge() group them by cat/pn.
14105                         ordered = False
14106                         cleanlist = [pkg.cpv for pkg in graph.order]
14107                 else:
14108                         # Order nodes from lowest to highest overall reference count for
14109                         # optimal root node selection.
14110                         node_refcounts = {}
14111                         for node in graph.order:
14112                                 node_refcounts[node] = len(graph.parent_nodes(node))
14113                         def cmp_reference_count(node1, node2):
14114                                 return node_refcounts[node1] - node_refcounts[node2]
14115                         graph.order.sort(key=cmp_sort_key(cmp_reference_count))
14116         
14117                         ignore_priority_range = [None]
14118                         ignore_priority_range.extend(
14119                                 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
14120                         while not graph.empty():
14121                                 for ignore_priority in ignore_priority_range:
14122                                         nodes = graph.root_nodes(ignore_priority=ignore_priority)
14123                                         if nodes:
14124                                                 break
14125                                 if not nodes:
14126                                         raise AssertionError("no root nodes")
14127                                 if ignore_priority is not None:
14128                                         # Some deps have been dropped due to circular dependencies,
14129                                         # so only pop one node in order do minimize the number that
14130                                         # are dropped.
14131                                         del nodes[1:]
14132                                 for node in nodes:
14133                                         graph.remove(node)
14134                                         cleanlist.append(node.cpv)
14135
14136                 unmerge(root_config, myopts, "unmerge", cleanlist,
14137                         ldpath_mtimes, ordered=ordered)
14138
14139         if action == "prune":
14140                 return
14141
14142         if not cleanlist and "--quiet" in myopts:
14143                 return
14144
14145         print "Packages installed:   "+str(len(vardb.cpv_all()))
14146         print "Packages in world:    " + \
14147                 str(len(root_config.sets["world"].getAtoms()))
14148         print "Packages in system:   " + \
14149                 str(len(root_config.sets["system"].getAtoms()))
14150         print "Required packages:    "+str(required_pkgs_total)
14151         if "--pretend" in myopts:
14152                 print "Number to remove:     "+str(len(cleanlist))
14153         else:
14154                 print "Number removed:       "+str(len(cleanlist))
14155
14156 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
14157         """
14158         Construct a depgraph for the given resume list. This will raise
14159         PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
14160         @rtype: tuple
14161         @returns: (success, depgraph, dropped_tasks)
14162         """
14163         skip_masked = True
14164         skip_unsatisfied = True
14165         mergelist = mtimedb["resume"]["mergelist"]
14166         dropped_tasks = set()
14167         while True:
14168                 mydepgraph = depgraph(settings, trees,
14169                         myopts, myparams, spinner)
14170                 try:
14171                         success = mydepgraph.loadResumeCommand(mtimedb["resume"],
14172                                 skip_masked=skip_masked)
14173                 except depgraph.UnsatisfiedResumeDep, e:
14174                         if not skip_unsatisfied:
14175                                 raise
14176
14177                         graph = mydepgraph.digraph
14178                         unsatisfied_parents = dict((dep.parent, dep.parent) \
14179                                 for dep in e.value)
14180                         traversed_nodes = set()
14181                         unsatisfied_stack = list(unsatisfied_parents)
14182                         while unsatisfied_stack:
14183                                 pkg = unsatisfied_stack.pop()
14184                                 if pkg in traversed_nodes:
14185                                         continue
14186                                 traversed_nodes.add(pkg)
14187
14188                                 # If this package was pulled in by a parent
14189                                 # package scheduled for merge, removing this
14190                                 # package may cause the the parent package's
14191                                 # dependency to become unsatisfied.
14192                                 for parent_node in graph.parent_nodes(pkg):
14193                                         if not isinstance(parent_node, Package) \
14194                                                 or parent_node.operation not in ("merge", "nomerge"):
14195                                                 continue
14196                                         unsatisfied = \
14197                                                 graph.child_nodes(parent_node,
14198                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
14199                                         if pkg in unsatisfied:
14200                                                 unsatisfied_parents[parent_node] = parent_node
14201                                                 unsatisfied_stack.append(parent_node)
14202
14203                         pruned_mergelist = []
14204                         for x in mergelist:
14205                                 if isinstance(x, list) and \
14206                                         tuple(x) not in unsatisfied_parents:
14207                                         pruned_mergelist.append(x)
14208
14209                         # If the mergelist doesn't shrink then this loop is infinite.
14210                         if len(pruned_mergelist) == len(mergelist):
14211                                 # This happens if a package can't be dropped because
14212                                 # it's already installed, but it has unsatisfied PDEPEND.
14213                                 raise
14214                         mergelist[:] = pruned_mergelist
14215
14216                         # Exclude installed packages that have been removed from the graph due
14217                         # to failure to build/install runtime dependencies after the dependent
14218                         # package has already been installed.
14219                         dropped_tasks.update(pkg for pkg in \
14220                                 unsatisfied_parents if pkg.operation != "nomerge")
14221                         mydepgraph.break_refs(unsatisfied_parents)
14222
14223                         del e, graph, traversed_nodes, \
14224                                 unsatisfied_parents, unsatisfied_stack
14225                         continue
14226                 else:
14227                         break
14228         return (success, mydepgraph, dropped_tasks)
14229
14230 def action_build(settings, trees, mtimedb,
14231         myopts, myaction, myfiles, spinner):
14232
14233         # validate the state of the resume data
14234         # so that we can make assumptions later.
14235         for k in ("resume", "resume_backup"):
14236                 if k not in mtimedb:
14237                         continue
14238                 resume_data = mtimedb[k]
14239                 if not isinstance(resume_data, dict):
14240                         del mtimedb[k]
14241                         continue
14242                 mergelist = resume_data.get("mergelist")
14243                 if not isinstance(mergelist, list):
14244                         del mtimedb[k]
14245                         continue
14246                 for x in mergelist:
14247                         if not (isinstance(x, list) and len(x) == 4):
14248                                 continue
14249                         pkg_type, pkg_root, pkg_key, pkg_action = x
14250                         if pkg_root not in trees:
14251                                 # Current $ROOT setting differs,
14252                                 # so the list must be stale.
14253                                 mergelist = None
14254                                 break
14255                 if not mergelist:
14256                         del mtimedb[k]
14257                         continue
14258                 resume_opts = resume_data.get("myopts")
14259                 if not isinstance(resume_opts, (dict, list)):
14260                         del mtimedb[k]
14261                         continue
14262                 favorites = resume_data.get("favorites")
14263                 if not isinstance(favorites, list):
14264                         del mtimedb[k]
14265                         continue
14266
14267         resume = False
14268         if "--resume" in myopts and \
14269                 ("resume" in mtimedb or
14270                 "resume_backup" in mtimedb):
14271                 resume = True
14272                 if "resume" not in mtimedb:
14273                         mtimedb["resume"] = mtimedb["resume_backup"]
14274                         del mtimedb["resume_backup"]
14275                         mtimedb.commit()
14276                 # "myopts" is a list for backward compatibility.
14277                 resume_opts = mtimedb["resume"].get("myopts", [])
14278                 if isinstance(resume_opts, list):
14279                         resume_opts = dict((k,True) for k in resume_opts)
14280                 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
14281                         resume_opts.pop(opt, None)
14282                 myopts.update(resume_opts)
14283
14284                 if "--debug" in myopts:
14285                         writemsg_level("myopts %s\n" % (myopts,))
14286
14287                 # Adjust config according to options of the command being resumed.
14288                 for myroot in trees:
14289                         mysettings =  trees[myroot]["vartree"].settings
14290                         mysettings.unlock()
14291                         adjust_config(myopts, mysettings)
14292                         mysettings.lock()
14293                         del myroot, mysettings
14294
14295         ldpath_mtimes = mtimedb["ldpath"]
14296         favorites=[]
14297         merge_count = 0
14298         buildpkgonly = "--buildpkgonly" in myopts
14299         pretend = "--pretend" in myopts
14300         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14301         ask = "--ask" in myopts
14302         nodeps = "--nodeps" in myopts
14303         oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
14304         tree = "--tree" in myopts
14305         if nodeps and tree:
14306                 tree = False
14307                 del myopts["--tree"]
14308                 portage.writemsg(colorize("WARN", " * ") + \
14309                         "--tree is broken with --nodeps. Disabling...\n")
14310         debug = "--debug" in myopts
14311         verbose = "--verbose" in myopts
14312         quiet = "--quiet" in myopts
14313         if pretend or fetchonly:
14314                 # make the mtimedb readonly
14315                 mtimedb.filename = None
14316         if '--digest' in myopts or 'digest' in settings.features:
14317                 if '--digest' in myopts:
14318                         msg = "The --digest option"
14319                 else:
14320                         msg = "The FEATURES=digest setting"
14321
14322                 msg += " can prevent corruption from being" + \
14323                         " noticed. The `repoman manifest` command is the preferred" + \
14324                         " way to generate manifests and it is capable of doing an" + \
14325                         " entire repository or category at once."
14326                 prefix = bad(" * ")
14327                 writemsg(prefix + "\n")
14328                 from textwrap import wrap
14329                 for line in wrap(msg, 72):
14330                         writemsg("%s%s\n" % (prefix, line))
14331                 writemsg(prefix + "\n")
14332
14333         if "--quiet" not in myopts and \
14334                 ("--pretend" in myopts or "--ask" in myopts or \
14335                 "--tree" in myopts or "--verbose" in myopts):
14336                 action = ""
14337                 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14338                         action = "fetched"
14339                 elif "--buildpkgonly" in myopts:
14340                         action = "built"
14341                 else:
14342                         action = "merged"
14343                 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
14344                         print
14345                         print darkgreen("These are the packages that would be %s, in reverse order:") % action
14346                         print
14347                 else:
14348                         print
14349                         print darkgreen("These are the packages that would be %s, in order:") % action
14350                         print
14351
14352         show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
14353         if not show_spinner:
14354                 spinner.update = spinner.update_quiet
14355
14356         if resume:
14357                 favorites = mtimedb["resume"].get("favorites")
14358                 if not isinstance(favorites, list):
14359                         favorites = []
14360
14361                 if show_spinner:
14362                         print "Calculating dependencies  ",
14363                 myparams = create_depgraph_params(myopts, myaction)
14364
14365                 resume_data = mtimedb["resume"]
14366                 mergelist = resume_data["mergelist"]
14367                 if mergelist and "--skipfirst" in myopts:
14368                         for i, task in enumerate(mergelist):
14369                                 if isinstance(task, list) and \
14370                                         task and task[-1] == "merge":
14371                                         del mergelist[i]
14372                                         break
14373
14374                 success = False
14375                 mydepgraph = None
14376                 try:
14377                         success, mydepgraph, dropped_tasks = resume_depgraph(
14378                                 settings, trees, mtimedb, myopts, myparams, spinner)
14379                 except (portage.exception.PackageNotFound,
14380                         depgraph.UnsatisfiedResumeDep), e:
14381                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
14382                                 mydepgraph = e.depgraph
14383                         if show_spinner:
14384                                 print
14385                         from textwrap import wrap
14386                         from portage.output import EOutput
14387                         out = EOutput()
14388
14389                         resume_data = mtimedb["resume"]
14390                         mergelist = resume_data.get("mergelist")
14391                         if not isinstance(mergelist, list):
14392                                 mergelist = []
14393                         if mergelist and debug or (verbose and not quiet):
14394                                 out.eerror("Invalid resume list:")
14395                                 out.eerror("")
14396                                 indent = "  "
14397                                 for task in mergelist:
14398                                         if isinstance(task, list):
14399                                                 out.eerror(indent + str(tuple(task)))
14400                                 out.eerror("")
14401
14402                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
14403                                 out.eerror("One or more packages are either masked or " + \
14404                                         "have missing dependencies:")
14405                                 out.eerror("")
14406                                 indent = "  "
14407                                 for dep in e.value:
14408                                         if dep.atom is None:
14409                                                 out.eerror(indent + "Masked package:")
14410                                                 out.eerror(2 * indent + str(dep.parent))
14411                                                 out.eerror("")
14412                                         else:
14413                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
14414                                                 out.eerror(2 * indent + str(dep.parent))
14415                                                 out.eerror("")
14416                                 msg = "The resume list contains packages " + \
14417                                         "that are either masked or have " + \
14418                                         "unsatisfied dependencies. " + \
14419                                         "Please restart/continue " + \
14420                                         "the operation manually, or use --skipfirst " + \
14421                                         "to skip the first package in the list and " + \
14422                                         "any other packages that may be " + \
14423                                         "masked or have missing dependencies."
14424                                 for line in wrap(msg, 72):
14425                                         out.eerror(line)
14426                         elif isinstance(e, portage.exception.PackageNotFound):
14427                                 out.eerror("An expected package is " + \
14428                                         "not available: %s" % str(e))
14429                                 out.eerror("")
14430                                 msg = "The resume list contains one or more " + \
14431                                         "packages that are no longer " + \
14432                                         "available. Please restart/continue " + \
14433                                         "the operation manually."
14434                                 for line in wrap(msg, 72):
14435                                         out.eerror(line)
14436                 else:
14437                         if show_spinner:
14438                                 print "\b\b... done!"
14439
14440                 if success:
14441                         if dropped_tasks:
14442                                 portage.writemsg("!!! One or more packages have been " + \
14443                                         "dropped due to\n" + \
14444                                         "!!! masking or unsatisfied dependencies:\n\n",
14445                                         noiselevel=-1)
14446                                 for task in dropped_tasks:
14447                                         portage.writemsg("  " + str(task) + "\n", noiselevel=-1)
14448                                 portage.writemsg("\n", noiselevel=-1)
14449                         del dropped_tasks
14450                 else:
14451                         if mydepgraph is not None:
14452                                 mydepgraph.display_problems()
14453                         if not (ask or pretend):
14454                                 # delete the current list and also the backup
14455                                 # since it's probably stale too.
14456                                 for k in ("resume", "resume_backup"):
14457                                         mtimedb.pop(k, None)
14458                                 mtimedb.commit()
14459
14460                         return 1
14461         else:
14462                 if ("--resume" in myopts):
14463                         print darkgreen("emerge: It seems we have nothing to resume...")
14464                         return os.EX_OK
14465
14466                 myparams = create_depgraph_params(myopts, myaction)
14467                 if "--quiet" not in myopts and "--nodeps" not in myopts:
14468                         print "Calculating dependencies  ",
14469                         sys.stdout.flush()
14470                 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
14471                 try:
14472                         retval, favorites = mydepgraph.select_files(myfiles)
14473                 except portage.exception.PackageNotFound, e:
14474                         portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
14475                         return 1
14476                 except portage.exception.PackageSetNotFound, e:
14477                         root_config = trees[settings["ROOT"]]["root_config"]
14478                         display_missing_pkg_set(root_config, e.value)
14479                         return 1
14480                 if show_spinner:
14481                         print "\b\b... done!"
14482                 if not retval:
14483                         mydepgraph.display_problems()
14484                         return 1
14485
14486         if "--pretend" not in myopts and \
14487                 ("--ask" in myopts or "--tree" in myopts or \
14488                 "--verbose" in myopts) and \
14489                 not ("--quiet" in myopts and "--ask" not in myopts):
14490                 if "--resume" in myopts:
14491                         mymergelist = mydepgraph.altlist()
14492                         if len(mymergelist) == 0:
14493                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14494                                 return os.EX_OK
14495                         favorites = mtimedb["resume"]["favorites"]
14496                         retval = mydepgraph.display(
14497                                 mydepgraph.altlist(reversed=tree),
14498                                 favorites=favorites)
14499                         mydepgraph.display_problems()
14500                         if retval != os.EX_OK:
14501                                 return retval
14502                         prompt="Would you like to resume merging these packages?"
14503                 else:
14504                         retval = mydepgraph.display(
14505                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
14506                                 favorites=favorites)
14507                         mydepgraph.display_problems()
14508                         if retval != os.EX_OK:
14509                                 return retval
14510                         mergecount=0
14511                         for x in mydepgraph.altlist():
14512                                 if isinstance(x, Package) and x.operation == "merge":
14513                                         mergecount += 1
14514
14515                         if mergecount==0:
14516                                 sets = trees[settings["ROOT"]]["root_config"].sets
14517                                 world_candidates = None
14518                                 if "--noreplace" in myopts and \
14519                                         not oneshot and favorites:
14520                                         # Sets that are not world candidates are filtered
14521                                         # out here since the favorites list needs to be
14522                                         # complete for depgraph.loadResumeCommand() to
14523                                         # operate correctly.
14524                                         world_candidates = [x for x in favorites \
14525                                                 if not (x.startswith(SETPREFIX) and \
14526                                                 not sets[x[1:]].world_candidate)]
14527                                 if "--noreplace" in myopts and \
14528                                         not oneshot and world_candidates:
14529                                         print
14530                                         for x in world_candidates:
14531                                                 print " %s %s" % (good("*"), x)
14532                                         prompt="Would you like to add these packages to your world favorites?"
14533                                 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
14534                                         prompt="Nothing to merge; would you like to auto-clean packages?"
14535                                 else:
14536                                         print
14537                                         print "Nothing to merge; quitting."
14538                                         print
14539                                         return os.EX_OK
14540                         elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14541                                 prompt="Would you like to fetch the source files for these packages?"
14542                         else:
14543                                 prompt="Would you like to merge these packages?"
14544                 print
14545                 if "--ask" in myopts and userquery(prompt) == "No":
14546                         print
14547                         print "Quitting."
14548                         print
14549                         return os.EX_OK
14550                 # Don't ask again (e.g. when auto-cleaning packages after merge)
14551                 myopts.pop("--ask", None)
14552
14553         if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14554                 if ("--resume" in myopts):
14555                         mymergelist = mydepgraph.altlist()
14556                         if len(mymergelist) == 0:
14557                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14558                                 return os.EX_OK
14559                         favorites = mtimedb["resume"]["favorites"]
14560                         retval = mydepgraph.display(
14561                                 mydepgraph.altlist(reversed=tree),
14562                                 favorites=favorites)
14563                         mydepgraph.display_problems()
14564                         if retval != os.EX_OK:
14565                                 return retval
14566                 else:
14567                         retval = mydepgraph.display(
14568                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
14569                                 favorites=favorites)
14570                         mydepgraph.display_problems()
14571                         if retval != os.EX_OK:
14572                                 return retval
14573                         if "--buildpkgonly" in myopts:
14574                                 graph_copy = mydepgraph.digraph.clone()
14575                                 removed_nodes = set()
14576                                 for node in graph_copy:
14577                                         if not isinstance(node, Package) or \
14578                                                 node.operation == "nomerge":
14579                                                 removed_nodes.add(node)
14580                                 graph_copy.difference_update(removed_nodes)
14581                                 if not graph_copy.hasallzeros(ignore_priority = \
14582                                         DepPrioritySatisfiedRange.ignore_medium):
14583                                         print "\n!!! --buildpkgonly requires all dependencies to be merged."
14584                                         print "!!! You have to merge the dependencies before you can build this package.\n"
14585                                         return 1
14586         else:
14587                 if "--buildpkgonly" in myopts:
14588                         graph_copy = mydepgraph.digraph.clone()
14589                         removed_nodes = set()
14590                         for node in graph_copy:
14591                                 if not isinstance(node, Package) or \
14592                                         node.operation == "nomerge":
14593                                         removed_nodes.add(node)
14594                         graph_copy.difference_update(removed_nodes)
14595                         if not graph_copy.hasallzeros(ignore_priority = \
14596                                 DepPrioritySatisfiedRange.ignore_medium):
14597                                 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14598                                 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
14599                                 return 1
14600
14601                 if ("--resume" in myopts):
14602                         favorites=mtimedb["resume"]["favorites"]
14603                         mymergelist = mydepgraph.altlist()
14604                         mydepgraph.break_refs(mymergelist)
14605                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
14606                                 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
14607                         del mydepgraph, mymergelist
14608                         clear_caches(trees)
14609
14610                         retval = mergetask.merge()
14611                         merge_count = mergetask.curval
14612                 else:
14613                         if "resume" in mtimedb and \
14614                         "mergelist" in mtimedb["resume"] and \
14615                         len(mtimedb["resume"]["mergelist"]) > 1:
14616                                 mtimedb["resume_backup"] = mtimedb["resume"]
14617                                 del mtimedb["resume"]
14618                                 mtimedb.commit()
14619                         mtimedb["resume"]={}
14620                         # Stored as a dict starting with portage-2.1.6_rc1, and supported
14621                         # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
14622                         # a list type for options.
14623                         mtimedb["resume"]["myopts"] = myopts.copy()
14624
14625                         # Convert Atom instances to plain str.
14626                         mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
14627
14628                         pkglist = mydepgraph.altlist()
14629                         mydepgraph.saveNomergeFavorites()
14630                         mydepgraph.break_refs(pkglist)
14631                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
14632                                 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
14633                         del mydepgraph, pkglist
14634                         clear_caches(trees)
14635
14636                         retval = mergetask.merge()
14637                         merge_count = mergetask.curval
14638
14639                 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
14640                         if "yes" == settings.get("AUTOCLEAN"):
14641                                 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
14642                                 unmerge(trees[settings["ROOT"]]["root_config"],
14643                                         myopts, "clean", [],
14644                                         ldpath_mtimes, autoclean=1)
14645                         else:
14646                                 portage.writemsg_stdout(colorize("WARN", "WARNING:")
14647                                         + " AUTOCLEAN is disabled.  This can cause serious"
14648                                         + " problems due to overlapping packages.\n")
14649                         trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
14650
14651                 return retval
14652
14653 def multiple_actions(action1, action2):
14654         sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
14655         sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
14656         sys.exit(1)
14657
14658 def insert_optional_args(args):
14659         """
14660         Parse optional arguments and insert a value if one has
14661         not been provided. This is done before feeding the args
14662         to the optparse parser since that parser does not support
14663         this feature natively.
14664         """
14665
14666         new_args = []
14667         jobs_opts = ("-j", "--jobs")
14668         arg_stack = args[:]
14669         arg_stack.reverse()
14670         while arg_stack:
14671                 arg = arg_stack.pop()
14672
14673                 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
14674                 if not (short_job_opt or arg in jobs_opts):
14675                         new_args.append(arg)
14676                         continue
14677
14678                 # Insert an empty placeholder in order to
14679                 # satisfy the requirements of optparse.
14680
14681                 new_args.append("--jobs")
14682                 job_count = None
14683                 saved_opts = None
14684                 if short_job_opt and len(arg) > 2:
14685                         if arg[:2] == "-j":
14686                                 try:
14687                                         job_count = int(arg[2:])
14688                                 except ValueError:
14689                                         saved_opts = arg[2:]
14690                         else:
14691                                 job_count = "True"
14692                                 saved_opts = arg[1:].replace("j", "")
14693
14694                 if job_count is None and arg_stack:
14695                         try:
14696                                 job_count = int(arg_stack[-1])
14697                         except ValueError:
14698                                 pass
14699                         else:
14700                                 # Discard the job count from the stack
14701                                 # since we're consuming it here.
14702                                 arg_stack.pop()
14703
14704                 if job_count is None:
14705                         # unlimited number of jobs
14706                         new_args.append("True")
14707                 else:
14708                         new_args.append(str(job_count))
14709
14710                 if saved_opts is not None:
14711                         new_args.append("-" + saved_opts)
14712
14713         return new_args
14714
14715 def parse_opts(tmpcmdline, silent=False):
14716         myaction=None
14717         myopts = {}
14718         myfiles=[]
14719
14720         global actions, options, shortmapping
14721
14722         longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
14723         argument_options = {
14724                 "--config-root": {
14725                         "help":"specify the location for portage configuration files",
14726                         "action":"store"
14727                 },
14728                 "--color": {
14729                         "help":"enable or disable color output",
14730                         "type":"choice",
14731                         "choices":("y", "n")
14732                 },
14733
14734                 "--jobs": {
14735
14736                         "help"   : "Specifies the number of packages to build " + \
14737                                 "simultaneously.",
14738
14739                         "action" : "store"
14740                 },
14741
14742                 "--load-average": {
14743
14744                         "help"   :"Specifies that no new builds should be started " + \
14745                                 "if there are other builds running and the load average " + \
14746                                 "is at least LOAD (a floating-point number).",
14747
14748                         "action" : "store"
14749                 },
14750
14751                 "--with-bdeps": {
14752                         "help":"include unnecessary build time dependencies",
14753                         "type":"choice",
14754                         "choices":("y", "n")
14755                 },
14756                 "--reinstall": {
14757                         "help":"specify conditions to trigger package reinstallation",
14758                         "type":"choice",
14759                         "choices":["changed-use"]
14760                 }
14761         }
14762
14763         from optparse import OptionParser
14764         parser = OptionParser()
14765         if parser.has_option("--help"):
14766                 parser.remove_option("--help")
14767
14768         for action_opt in actions:
14769                 parser.add_option("--" + action_opt, action="store_true",
14770                         dest=action_opt.replace("-", "_"), default=False)
14771         for myopt in options:
14772                 parser.add_option(myopt, action="store_true",
14773                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
14774         for shortopt, longopt in shortmapping.iteritems():
14775                 parser.add_option("-" + shortopt, action="store_true",
14776                         dest=longopt.lstrip("--").replace("-", "_"), default=False)
14777         for myalias, myopt in longopt_aliases.iteritems():
14778                 parser.add_option(myalias, action="store_true",
14779                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
14780
14781         for myopt, kwargs in argument_options.iteritems():
14782                 parser.add_option(myopt,
14783                         dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
14784
14785         tmpcmdline = insert_optional_args(tmpcmdline)
14786
14787         myoptions, myargs = parser.parse_args(args=tmpcmdline)
14788
14789         if myoptions.jobs:
14790                 jobs = None
14791                 if myoptions.jobs == "True":
14792                         jobs = True
14793                 else:
14794                         try:
14795                                 jobs = int(myoptions.jobs)
14796                         except ValueError:
14797                                 jobs = -1
14798
14799                 if jobs is not True and \
14800                         jobs < 1:
14801                         jobs = None
14802                         if not silent:
14803                                 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
14804                                         (myoptions.jobs,), noiselevel=-1)
14805
14806                 myoptions.jobs = jobs
14807
14808         if myoptions.load_average:
14809                 try:
14810                         load_average = float(myoptions.load_average)
14811                 except ValueError:
14812                         load_average = 0.0
14813
14814                 if load_average <= 0.0:
14815                         load_average = None
14816                         if not silent:
14817                                 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
14818                                         (myoptions.load_average,), noiselevel=-1)
14819
14820                 myoptions.load_average = load_average
14821
14822         for myopt in options:
14823                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
14824                 if v:
14825                         myopts[myopt] = True
14826
14827         for myopt in argument_options:
14828                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
14829                 if v is not None:
14830                         myopts[myopt] = v
14831
14832         if myoptions.searchdesc:
14833                 myoptions.search = True
14834
14835         for action_opt in actions:
14836                 v = getattr(myoptions, action_opt.replace("-", "_"))
14837                 if v:
14838                         if myaction:
14839                                 multiple_actions(myaction, action_opt)
14840                                 sys.exit(1)
14841                         myaction = action_opt
14842
14843         myfiles += myargs
14844
14845         return myaction, myopts, myfiles
14846
14847 def validate_ebuild_environment(trees):
14848         for myroot in trees:
14849                 settings = trees[myroot]["vartree"].settings
14850                 settings.validate()
14851
14852 def clear_caches(trees):
14853         for d in trees.itervalues():
14854                 d["porttree"].dbapi.melt()
14855                 d["porttree"].dbapi._aux_cache.clear()
14856                 d["bintree"].dbapi._aux_cache.clear()
14857                 d["bintree"].dbapi._clear_cache()
14858                 d["vartree"].dbapi.linkmap._clear_cache()
14859         portage.dircache.clear()
14860         gc.collect()
14861
14862 def load_emerge_config(trees=None):
14863         kwargs = {}
14864         for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
14865                 v = os.environ.get(envvar, None)
14866                 if v and v.strip():
14867                         kwargs[k] = v
14868         trees = portage.create_trees(trees=trees, **kwargs)
14869
14870         for root, root_trees in trees.iteritems():
14871                 settings = root_trees["vartree"].settings
14872                 setconfig = load_default_config(settings, root_trees)
14873                 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
14874
14875         settings = trees["/"]["vartree"].settings
14876
14877         for myroot in trees:
14878                 if myroot != "/":
14879                         settings = trees[myroot]["vartree"].settings
14880                         break
14881
14882         mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
14883         mtimedb = portage.MtimeDB(mtimedbfile)
14884         
14885         return settings, trees, mtimedb
14886
14887 def adjust_config(myopts, settings):
14888         """Make emerge specific adjustments to the config."""
14889
14890         # To enhance usability, make some vars case insensitive by forcing them to
14891         # lower case.
14892         for myvar in ("AUTOCLEAN", "NOCOLOR"):
14893                 if myvar in settings:
14894                         settings[myvar] = settings[myvar].lower()
14895                         settings.backup_changes(myvar)
14896         del myvar
14897
14898         # Kill noauto as it will break merges otherwise.
14899         if "noauto" in settings.features:
14900                 while "noauto" in settings.features:
14901                         settings.features.remove("noauto")
14902                 settings["FEATURES"] = " ".join(settings.features)
14903                 settings.backup_changes("FEATURES")
14904
14905         CLEAN_DELAY = 5
14906         try:
14907                 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
14908         except ValueError, e:
14909                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14910                 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
14911                         settings["CLEAN_DELAY"], noiselevel=-1)
14912         settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
14913         settings.backup_changes("CLEAN_DELAY")
14914
14915         EMERGE_WARNING_DELAY = 10
14916         try:
14917                 EMERGE_WARNING_DELAY = int(settings.get(
14918                         "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
14919         except ValueError, e:
14920                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14921                 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
14922                         settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
14923         settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
14924         settings.backup_changes("EMERGE_WARNING_DELAY")
14925
14926         if "--quiet" in myopts:
14927                 settings["PORTAGE_QUIET"]="1"
14928                 settings.backup_changes("PORTAGE_QUIET")
14929
14930         if "--verbose" in myopts:
14931                 settings["PORTAGE_VERBOSE"] = "1"
14932                 settings.backup_changes("PORTAGE_VERBOSE")
14933
14934         # Set so that configs will be merged regardless of remembered status
14935         if ("--noconfmem" in myopts):
14936                 settings["NOCONFMEM"]="1"
14937                 settings.backup_changes("NOCONFMEM")
14938
14939         # Set various debug markers... They should be merged somehow.
14940         PORTAGE_DEBUG = 0
14941         try:
14942                 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
14943                 if PORTAGE_DEBUG not in (0, 1):
14944                         portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
14945                                 PORTAGE_DEBUG, noiselevel=-1)
14946                         portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
14947                                 noiselevel=-1)
14948                         PORTAGE_DEBUG = 0
14949         except ValueError, e:
14950                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14951                 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
14952                         settings["PORTAGE_DEBUG"], noiselevel=-1)
14953                 del e
14954         if "--debug" in myopts:
14955                 PORTAGE_DEBUG = 1
14956         settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
14957         settings.backup_changes("PORTAGE_DEBUG")
14958
14959         if settings.get("NOCOLOR") not in ("yes","true"):
14960                 portage.output.havecolor = 1
14961
14962         """The explicit --color < y | n > option overrides the NOCOLOR environment
14963         variable and stdout auto-detection."""
14964         if "--color" in myopts:
14965                 if "y" == myopts["--color"]:
14966                         portage.output.havecolor = 1
14967                         settings["NOCOLOR"] = "false"
14968                 else:
14969                         portage.output.havecolor = 0
14970                         settings["NOCOLOR"] = "true"
14971                 settings.backup_changes("NOCOLOR")
14972         elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
14973                 portage.output.havecolor = 0
14974                 settings["NOCOLOR"] = "true"
14975                 settings.backup_changes("NOCOLOR")
14976
14977 def apply_priorities(settings):
14978         ionice(settings)
14979         nice(settings)
14980
14981 def nice(settings):
14982         try:
14983                 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
14984         except (OSError, ValueError), e:
14985                 out = portage.output.EOutput()
14986                 out.eerror("Failed to change nice value to '%s'" % \
14987                         settings["PORTAGE_NICENESS"])
14988                 out.eerror("%s\n" % str(e))
14989
14990 def ionice(settings):
14991
14992         ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
14993         if ionice_cmd:
14994                 ionice_cmd = shlex.split(ionice_cmd)
14995         if not ionice_cmd:
14996                 return
14997
14998         from portage.util import varexpand
14999         variables = {"PID" : str(os.getpid())}
15000         cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
15001
15002         try:
15003                 rval = portage.process.spawn(cmd, env=os.environ)
15004         except portage.exception.CommandNotFound:
15005                 # The OS kernel probably doesn't support ionice,
15006                 # so return silently.
15007                 return
15008
15009         if rval != os.EX_OK:
15010                 out = portage.output.EOutput()
15011                 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
15012                 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
15013
15014 def display_missing_pkg_set(root_config, set_name):
15015
15016         msg = []
15017         msg.append(("emerge: There are no sets to satisfy '%s'. " + \
15018                 "The following sets exist:") % \
15019                 colorize("INFORM", set_name))
15020         msg.append("")
15021
15022         for s in sorted(root_config.sets):
15023                 msg.append("    %s" % s)
15024         msg.append("")
15025
15026         writemsg_level("".join("%s\n" % l for l in msg),
15027                 level=logging.ERROR, noiselevel=-1)
15028
15029 def expand_set_arguments(myfiles, myaction, root_config):
15030         retval = os.EX_OK
15031         setconfig = root_config.setconfig
15032
15033         sets = setconfig.getSets()
15034
15035         # In order to know exactly which atoms/sets should be added to the
15036         # world file, the depgraph performs set expansion later. It will get
15037         # confused about where the atoms came from if it's not allowed to
15038         # expand them itself.
15039         do_not_expand = (None, )
15040         newargs = []
15041         for a in myfiles:
15042                 if a in ("system", "world"):
15043                         newargs.append(SETPREFIX+a)
15044                 else:
15045                         newargs.append(a)
15046         myfiles = newargs
15047         del newargs
15048         newargs = []
15049
15050         # separators for set arguments
15051         ARG_START = "{"
15052         ARG_END = "}"
15053
15054         # WARNING: all operators must be of equal length
15055         IS_OPERATOR = "/@"
15056         DIFF_OPERATOR = "-@"
15057         UNION_OPERATOR = "+@"
15058         
15059         for i in range(0, len(myfiles)):
15060                 if myfiles[i].startswith(SETPREFIX):
15061                         start = 0
15062                         end = 0
15063                         x = myfiles[i][len(SETPREFIX):]
15064                         newset = ""
15065                         while x:
15066                                 start = x.find(ARG_START)
15067                                 end = x.find(ARG_END)
15068                                 if start > 0 and start < end:
15069                                         namepart = x[:start]
15070                                         argpart = x[start+1:end]
15071                                 
15072                                         # TODO: implement proper quoting
15073                                         args = argpart.split(",")
15074                                         options = {}
15075                                         for a in args:
15076                                                 if "=" in a:
15077                                                         k, v  = a.split("=", 1)
15078                                                         options[k] = v
15079                                                 else:
15080                                                         options[a] = "True"
15081                                         setconfig.update(namepart, options)
15082                                         newset += (x[:start-len(namepart)]+namepart)
15083                                         x = x[end+len(ARG_END):]
15084                                 else:
15085                                         newset += x
15086                                         x = ""
15087                         myfiles[i] = SETPREFIX+newset
15088                                 
15089         sets = setconfig.getSets()
15090
15091         # display errors that occured while loading the SetConfig instance
15092         for e in setconfig.errors:
15093                 print colorize("BAD", "Error during set creation: %s" % e)
15094         
15095         # emerge relies on the existance of sets with names "world" and "system"
15096         required_sets = ("world", "system")
15097         missing_sets = []
15098
15099         for s in required_sets:
15100                 if s not in sets:
15101                         missing_sets.append(s)
15102         if missing_sets:
15103                 if len(missing_sets) > 2:
15104                         missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
15105                         missing_sets_str += ', and "%s"' % missing_sets[-1]
15106                 elif len(missing_sets) == 2:
15107                         missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
15108                 else:
15109                         missing_sets_str = '"%s"' % missing_sets[-1]
15110                 msg = ["emerge: incomplete set configuration, " + \
15111                         "missing set(s): %s" % missing_sets_str]
15112                 if sets:
15113                         msg.append("        sets defined: %s" % ", ".join(sets))
15114                 msg.append("        This usually means that '%s'" % \
15115                         (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
15116                 msg.append("        is missing or corrupt.")
15117                 for line in msg:
15118                         writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
15119                 return (None, 1)
15120         unmerge_actions = ("unmerge", "prune", "clean", "depclean")
15121
15122         for a in myfiles:
15123                 if a.startswith(SETPREFIX):
15124                         # support simple set operations (intersection, difference and union)
15125                         # on the commandline. Expressions are evaluated strictly left-to-right
15126                         if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
15127                                 expression = a[len(SETPREFIX):]
15128                                 expr_sets = []
15129                                 expr_ops = []
15130                                 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
15131                                         is_pos = expression.rfind(IS_OPERATOR)
15132                                         diff_pos = expression.rfind(DIFF_OPERATOR)
15133                                         union_pos = expression.rfind(UNION_OPERATOR)
15134                                         op_pos = max(is_pos, diff_pos, union_pos)
15135                                         s1 = expression[:op_pos]
15136                                         s2 = expression[op_pos+len(IS_OPERATOR):]
15137                                         op = expression[op_pos:op_pos+len(IS_OPERATOR)]
15138                                         if not s2 in sets:
15139                                                 display_missing_pkg_set(root_config, s2)
15140                                                 return (None, 1)
15141                                         expr_sets.insert(0, s2)
15142                                         expr_ops.insert(0, op)
15143                                         expression = s1
15144                                 if not expression in sets:
15145                                         display_missing_pkg_set(root_config, expression)
15146                                         return (None, 1)
15147                                 expr_sets.insert(0, expression)
15148                                 result = set(setconfig.getSetAtoms(expression))
15149                                 for i in range(0, len(expr_ops)):
15150                                         s2 = setconfig.getSetAtoms(expr_sets[i+1])
15151                                         if expr_ops[i] == IS_OPERATOR:
15152                                                 result.intersection_update(s2)
15153                                         elif expr_ops[i] == DIFF_OPERATOR:
15154                                                 result.difference_update(s2)
15155                                         elif expr_ops[i] == UNION_OPERATOR:
15156                                                 result.update(s2)
15157                                         else:
15158                                                 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
15159                                 newargs.extend(result)
15160                         else:                   
15161                                 s = a[len(SETPREFIX):]
15162                                 if s not in sets:
15163                                         display_missing_pkg_set(root_config, s)
15164                                         return (None, 1)
15165                                 setconfig.active.append(s)
15166                                 try:
15167                                         set_atoms = setconfig.getSetAtoms(s)
15168                                 except portage.exception.PackageSetNotFound, e:
15169                                         writemsg_level(("emerge: the given set '%s' " + \
15170                                                 "contains a non-existent set named '%s'.\n") % \
15171                                                 (s, e), level=logging.ERROR, noiselevel=-1)
15172                                         return (None, 1)
15173                                 if myaction in unmerge_actions and \
15174                                                 not sets[s].supportsOperation("unmerge"):
15175                                         sys.stderr.write("emerge: the given set '%s' does " % s + \
15176                                                 "not support unmerge operations\n")
15177                                         retval = 1
15178                                 elif not set_atoms:
15179                                         print "emerge: '%s' is an empty set" % s
15180                                 elif myaction not in do_not_expand:
15181                                         newargs.extend(set_atoms)
15182                                 else:
15183                                         newargs.append(SETPREFIX+s)
15184                                 for e in sets[s].errors:
15185                                         print e
15186                 else:
15187                         newargs.append(a)
15188         return (newargs, retval)
15189
15190 def repo_name_check(trees):
15191         missing_repo_names = set()
15192         for root, root_trees in trees.iteritems():
15193                 if "porttree" in root_trees:
15194                         portdb = root_trees["porttree"].dbapi
15195                         missing_repo_names.update(portdb.porttrees)
15196                         repos = portdb.getRepositories()
15197                         for r in repos:
15198                                 missing_repo_names.discard(portdb.getRepositoryPath(r))
15199                         if portdb.porttree_root in missing_repo_names and \
15200                                 not os.path.exists(os.path.join(
15201                                 portdb.porttree_root, "profiles")):
15202                                 # This is normal if $PORTDIR happens to be empty,
15203                                 # so don't warn about it.
15204                                 missing_repo_names.remove(portdb.porttree_root)
15205
15206         if missing_repo_names:
15207                 msg = []
15208                 msg.append("WARNING: One or more repositories " + \
15209                         "have missing repo_name entries:")
15210                 msg.append("")
15211                 for p in missing_repo_names:
15212                         msg.append("\t%s/profiles/repo_name" % (p,))
15213                 msg.append("")
15214                 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
15215                         "should be a plain text file containing a unique " + \
15216                         "name for the repository on the first line.", 70))
15217                 writemsg_level("".join("%s\n" % l for l in msg),
15218                         level=logging.WARNING, noiselevel=-1)
15219
15220         return bool(missing_repo_names)
15221
15222 def config_protect_check(trees):
15223         for root, root_trees in trees.iteritems():
15224                 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
15225                         msg = "!!! CONFIG_PROTECT is empty"
15226                         if root != "/":
15227                                 msg += " for '%s'" % root
15228                         writemsg_level(msg, level=logging.WARN, noiselevel=-1)
15229
15230 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
15231
15232         if "--quiet" in myopts:
15233                 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15234                 print "!!! one of the following fully-qualified ebuild names instead:\n"
15235                 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15236                         print "    " + colorize("INFORM", cp)
15237                 return
15238
15239         s = search(root_config, spinner, "--searchdesc" in myopts,
15240                 "--quiet" not in myopts, "--usepkg" in myopts,
15241                 "--usepkgonly" in myopts)
15242         null_cp = portage.dep_getkey(insert_category_into_atom(
15243                 arg, "null"))
15244         cat, atom_pn = portage.catsplit(null_cp)
15245         s.searchkey = atom_pn
15246         for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15247                 s.addCP(cp)
15248         s.output()
15249         print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15250         print "!!! one of the above fully-qualified ebuild names instead.\n"
15251
15252 def profile_check(trees, myaction, myopts):
15253         if myaction in ("info", "sync"):
15254                 return os.EX_OK
15255         elif "--version" in myopts or "--help" in myopts:
15256                 return os.EX_OK
15257         for root, root_trees in trees.iteritems():
15258                 if root_trees["root_config"].settings.profiles:
15259                         continue
15260                 # generate some profile related warning messages
15261                 validate_ebuild_environment(trees)
15262                 msg = "If you have just changed your profile configuration, you " + \
15263                         "should revert back to the previous configuration. Due to " + \
15264                         "your current profile being invalid, allowed actions are " + \
15265                         "limited to --help, --info, --sync, and --version."
15266                 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
15267                         level=logging.ERROR, noiselevel=-1)
15268                 return 1
15269         return os.EX_OK
15270
15271 def emerge_main():
15272         global portage  # NFC why this is necessary now - genone
15273         portage._disable_legacy_globals()
15274         # Disable color until we're sure that it should be enabled (after
15275         # EMERGE_DEFAULT_OPTS has been parsed).
15276         portage.output.havecolor = 0
15277         # This first pass is just for options that need to be known as early as
15278         # possible, such as --config-root.  They will be parsed again later,
15279         # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
15280         # the value of --config-root).
15281         myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
15282         if "--debug" in myopts:
15283                 os.environ["PORTAGE_DEBUG"] = "1"
15284         if "--config-root" in myopts:
15285                 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
15286
15287         # Portage needs to ensure a sane umask for the files it creates.
15288         os.umask(022)
15289         settings, trees, mtimedb = load_emerge_config()
15290         portdb = trees[settings["ROOT"]]["porttree"].dbapi
15291         rval = profile_check(trees, myaction, myopts)
15292         if rval != os.EX_OK:
15293                 return rval
15294
15295         if portage._global_updates(trees, mtimedb["updates"]):
15296                 mtimedb.commit()
15297                 # Reload the whole config from scratch.
15298                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15299                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15300
15301         xterm_titles = "notitles" not in settings.features
15302
15303         tmpcmdline = []
15304         if "--ignore-default-opts" not in myopts:
15305                 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
15306         tmpcmdline.extend(sys.argv[1:])
15307         myaction, myopts, myfiles = parse_opts(tmpcmdline)
15308
15309         if "--digest" in myopts:
15310                 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
15311                 # Reload the whole config from scratch so that the portdbapi internal
15312                 # config is updated with new FEATURES.
15313                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15314                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15315
15316         for myroot in trees:
15317                 mysettings =  trees[myroot]["vartree"].settings
15318                 mysettings.unlock()
15319                 adjust_config(myopts, mysettings)
15320                 if '--pretend' not in myopts and myaction in \
15321                         (None, 'clean', 'depclean', 'prune', 'unmerge'):
15322                         mysettings["PORTAGE_COUNTER_HASH"] = \
15323                                 trees[myroot]["vartree"].dbapi._counter_hash()
15324                         mysettings.backup_changes("PORTAGE_COUNTER_HASH")
15325                 mysettings.lock()
15326                 del myroot, mysettings
15327
15328         apply_priorities(settings)
15329
15330         spinner = stdout_spinner()
15331         if "candy" in settings.features:
15332                 spinner.update = spinner.update_scroll
15333
15334         if "--quiet" not in myopts:
15335                 portage.deprecated_profile_check(settings=settings)
15336                 repo_name_check(trees)
15337                 config_protect_check(trees)
15338
15339         eclasses_overridden = {}
15340         for mytrees in trees.itervalues():
15341                 mydb = mytrees["porttree"].dbapi
15342                 # Freeze the portdbapi for performance (memoize all xmatch results).
15343                 mydb.freeze()
15344                 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
15345         del mytrees, mydb
15346
15347         if eclasses_overridden and \
15348                 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
15349                 prefix = bad(" * ")
15350                 if len(eclasses_overridden) == 1:
15351                         writemsg(prefix + "Overlay eclass overrides " + \
15352                                 "eclass from PORTDIR:\n", noiselevel=-1)
15353                 else:
15354                         writemsg(prefix + "Overlay eclasses override " + \
15355                                 "eclasses from PORTDIR:\n", noiselevel=-1)
15356                 writemsg(prefix + "\n", noiselevel=-1)
15357                 for eclass_name in sorted(eclasses_overridden):
15358                         writemsg(prefix + "  '%s/%s.eclass'\n" % \
15359                                 (eclasses_overridden[eclass_name], eclass_name),
15360                                 noiselevel=-1)
15361                 writemsg(prefix + "\n", noiselevel=-1)
15362                 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
15363                 "because it will trigger invalidation of cached ebuild metadata " + \
15364                 "that is distributed with the portage tree. If you must " + \
15365                 "override eclasses from PORTDIR then you are advised to add " + \
15366                 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
15367                 "`emerge --regen` after each time that you run `emerge --sync`. " + \
15368                 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
15369                 "you would like to disable this warning."
15370                 from textwrap import wrap
15371                 for line in wrap(msg, 72):
15372                         writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
15373
15374         if "moo" in myfiles:
15375                 print """
15376
15377   Larry loves Gentoo (""" + platform.system() + """)
15378
15379  _______________________
15380 < Have you mooed today? >
15381  -----------------------
15382         \   ^__^
15383          \  (oo)\_______
15384             (__)\       )\/\ 
15385                 ||----w |
15386                 ||     ||
15387
15388 """
15389
15390         for x in myfiles:
15391                 ext = os.path.splitext(x)[1]
15392                 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
15393                         print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
15394                         break
15395
15396         root_config = trees[settings["ROOT"]]["root_config"]
15397         if myaction == "list-sets":
15398                 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
15399                 sys.stdout.flush()
15400                 return os.EX_OK
15401
15402         # only expand sets for actions taking package arguments
15403         oldargs = myfiles[:]
15404         if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
15405                 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
15406                 if retval != os.EX_OK:
15407                         return retval
15408
15409                 # Need to handle empty sets specially, otherwise emerge will react 
15410                 # with the help message for empty argument lists
15411                 if oldargs and not myfiles:
15412                         print "emerge: no targets left after set expansion"
15413                         return 0
15414
15415         if ("--tree" in myopts) and ("--columns" in myopts):
15416                 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
15417                 return 1
15418
15419         if ("--quiet" in myopts):
15420                 spinner.update = spinner.update_quiet
15421                 portage.util.noiselimit = -1
15422
15423         # Always create packages if FEATURES=buildpkg
15424         # Imply --buildpkg if --buildpkgonly
15425         if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
15426                 if "--buildpkg" not in myopts:
15427                         myopts["--buildpkg"] = True
15428
15429         # Always try and fetch binary packages if FEATURES=getbinpkg
15430         if ("getbinpkg" in settings.features):
15431                 myopts["--getbinpkg"] = True
15432
15433         if "--buildpkgonly" in myopts:
15434                 # --buildpkgonly will not merge anything, so
15435                 # it cancels all binary package options.
15436                 for opt in ("--getbinpkg", "--getbinpkgonly",
15437                         "--usepkg", "--usepkgonly"):
15438                         myopts.pop(opt, None)
15439
15440         if "--fetch-all-uri" in myopts:
15441                 myopts["--fetchonly"] = True
15442
15443         if "--skipfirst" in myopts and "--resume" not in myopts:
15444                 myopts["--resume"] = True
15445
15446         if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
15447                 myopts["--usepkgonly"] = True
15448
15449         if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
15450                 myopts["--getbinpkg"] = True
15451
15452         if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
15453                 myopts["--usepkg"] = True
15454
15455         # Also allow -K to apply --usepkg/-k
15456         if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
15457                 myopts["--usepkg"] = True
15458
15459         # Allow -p to remove --ask
15460         if ("--pretend" in myopts) and ("--ask" in myopts):
15461                 print ">>> --pretend disables --ask... removing --ask from options."
15462                 del myopts["--ask"]
15463
15464         # forbid --ask when not in a terminal
15465         # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
15466         if ("--ask" in myopts) and (not sys.stdin.isatty()):
15467                 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
15468                         noiselevel=-1)
15469                 return 1
15470
15471         if settings.get("PORTAGE_DEBUG", "") == "1":
15472                 spinner.update = spinner.update_quiet
15473                 portage.debug=1
15474                 if "python-trace" in settings.features:
15475                         import portage.debug
15476                         portage.debug.set_trace(True)
15477
15478         if not ("--quiet" in myopts):
15479                 if not sys.stdout.isatty() or ("--nospinner" in myopts):
15480                         spinner.update = spinner.update_basic
15481
15482         if myaction == 'version':
15483                 print getportageversion(settings["PORTDIR"], settings["ROOT"],
15484                         settings.profile_path, settings["CHOST"],
15485                         trees[settings["ROOT"]]["vartree"].dbapi)
15486                 return 0
15487         elif "--help" in myopts:
15488                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15489                 return 0
15490
15491         if "--debug" in myopts:
15492                 print "myaction", myaction
15493                 print "myopts", myopts
15494
15495         if not myaction and not myfiles and "--resume" not in myopts:
15496                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15497                 return 1
15498
15499         pretend = "--pretend" in myopts
15500         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
15501         buildpkgonly = "--buildpkgonly" in myopts
15502
15503         # check if root user is the current user for the actions where emerge needs this
15504         if portage.secpass < 2:
15505                 # We've already allowed "--version" and "--help" above.
15506                 if "--pretend" not in myopts and myaction not in ("search","info"):
15507                         need_superuser = not \
15508                                 (fetchonly or \
15509                                 (buildpkgonly and secpass >= 1) or \
15510                                 myaction in ("metadata", "regen") or \
15511                                 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
15512                         if portage.secpass < 1 or \
15513                                 need_superuser:
15514                                 if need_superuser:
15515                                         access_desc = "superuser"
15516                                 else:
15517                                         access_desc = "portage group"
15518                                 # Always show portage_group_warning() when only portage group
15519                                 # access is required but the user is not in the portage group.
15520                                 from portage.data import portage_group_warning
15521                                 if "--ask" in myopts:
15522                                         myopts["--pretend"] = True
15523                                         del myopts["--ask"]
15524                                         print ("%s access is required... " + \
15525                                                 "adding --pretend to options.\n") % access_desc
15526                                         if portage.secpass < 1 and not need_superuser:
15527                                                 portage_group_warning()
15528                                 else:
15529                                         sys.stderr.write(("emerge: %s access is " + \
15530                                                 "required.\n\n") % access_desc)
15531                                         if portage.secpass < 1 and not need_superuser:
15532                                                 portage_group_warning()
15533                                         return 1
15534
15535         disable_emergelog = False
15536         for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
15537                 if x in myopts:
15538                         disable_emergelog = True
15539                         break
15540         if myaction in ("search", "info"):
15541                 disable_emergelog = True
15542         if disable_emergelog:
15543                 """ Disable emergelog for everything except build or unmerge
15544                 operations.  This helps minimize parallel emerge.log entries that can
15545                 confuse log parsers.  We especially want it disabled during
15546                 parallel-fetch, which uses --resume --fetchonly."""
15547                 global emergelog
15548                 def emergelog(*pargs, **kargs):
15549                         pass
15550
15551         if not "--pretend" in myopts:
15552                 emergelog(xterm_titles, "Started emerge on: "+\
15553                         time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
15554                 myelogstr=""
15555                 if myopts:
15556                         myelogstr=" ".join(myopts)
15557                 if myaction:
15558                         myelogstr+=" "+myaction
15559                 if myfiles:
15560                         myelogstr += " " + " ".join(oldargs)
15561                 emergelog(xterm_titles, " *** emerge " + myelogstr)
15562         del oldargs
15563
15564         def emergeexitsig(signum, frame):
15565                 signal.signal(signal.SIGINT, signal.SIG_IGN)
15566                 signal.signal(signal.SIGTERM, signal.SIG_IGN)
15567                 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
15568                 sys.exit(100+signum)
15569         signal.signal(signal.SIGINT, emergeexitsig)
15570         signal.signal(signal.SIGTERM, emergeexitsig)
15571
15572         def emergeexit():
15573                 """This gets out final log message in before we quit."""
15574                 if "--pretend" not in myopts:
15575                         emergelog(xterm_titles, " *** terminating.")
15576                 if "notitles" not in settings.features:
15577                         xtermTitleReset()
15578         portage.atexit_register(emergeexit)
15579
15580         if myaction in ("config", "metadata", "regen", "sync"):
15581                 if "--pretend" in myopts:
15582                         sys.stderr.write(("emerge: The '%s' action does " + \
15583                                 "not support '--pretend'.\n") % myaction)
15584                         return 1
15585
15586         if "sync" == myaction:
15587                 return action_sync(settings, trees, mtimedb, myopts, myaction)
15588         elif "metadata" == myaction:
15589                 action_metadata(settings, portdb, myopts)
15590         elif myaction=="regen":
15591                 validate_ebuild_environment(trees)
15592                 return action_regen(settings, portdb, myopts.get("--jobs"),
15593                         myopts.get("--load-average"))
15594         # HELP action
15595         elif "config"==myaction:
15596                 validate_ebuild_environment(trees)
15597                 action_config(settings, trees, myopts, myfiles)
15598
15599         # SEARCH action
15600         elif "search"==myaction:
15601                 validate_ebuild_environment(trees)
15602                 action_search(trees[settings["ROOT"]]["root_config"],
15603                         myopts, myfiles, spinner)
15604         elif myaction in ("clean", "unmerge") or \
15605                 (myaction == "prune" and "--nodeps" in myopts):
15606                 validate_ebuild_environment(trees)
15607
15608                 # Ensure atoms are valid before calling unmerge().
15609                 # For backward compat, leading '=' is not required.
15610                 for x in myfiles:
15611                         if is_valid_package_atom(x) or \
15612                                 is_valid_package_atom("=" + x):
15613                                 continue
15614                         msg = []
15615                         msg.append("'%s' is not a valid package atom." % (x,))
15616                         msg.append("Please check ebuild(5) for full details.")
15617                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15618                                 level=logging.ERROR, noiselevel=-1)
15619                         return 1
15620
15621                 # When given a list of atoms, unmerge
15622                 # them in the order given.
15623                 ordered = myaction == "unmerge"
15624                 if 1 == unmerge(root_config, myopts, myaction, myfiles,
15625                         mtimedb["ldpath"], ordered=ordered):
15626                         if not (buildpkgonly or fetchonly or pretend):
15627                                 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15628
15629         elif myaction in ("depclean", "info", "prune"):
15630
15631                 # Ensure atoms are valid before calling unmerge().
15632                 vardb = trees[settings["ROOT"]]["vartree"].dbapi
15633                 valid_atoms = []
15634                 for x in myfiles:
15635                         if is_valid_package_atom(x):
15636                                 try:
15637                                         valid_atoms.append(
15638                                                 portage.dep_expand(x, mydb=vardb, settings=settings))
15639                                 except portage.exception.AmbiguousPackageName, e:
15640                                         msg = "The short ebuild name \"" + x + \
15641                                                 "\" is ambiguous.  Please specify " + \
15642                                                 "one of the following " + \
15643                                                 "fully-qualified ebuild names instead:"
15644                                         for line in textwrap.wrap(msg, 70):
15645                                                 writemsg_level("!!! %s\n" % (line,),
15646                                                         level=logging.ERROR, noiselevel=-1)
15647                                         for i in e[0]:
15648                                                 writemsg_level("    %s\n" % colorize("INFORM", i),
15649                                                         level=logging.ERROR, noiselevel=-1)
15650                                         writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
15651                                         return 1
15652                                 continue
15653                         msg = []
15654                         msg.append("'%s' is not a valid package atom." % (x,))
15655                         msg.append("Please check ebuild(5) for full details.")
15656                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15657                                 level=logging.ERROR, noiselevel=-1)
15658                         return 1
15659
15660                 if myaction == "info":
15661                         return action_info(settings, trees, myopts, valid_atoms)
15662
15663                 validate_ebuild_environment(trees)
15664                 action_depclean(settings, trees, mtimedb["ldpath"],
15665                         myopts, myaction, valid_atoms, spinner)
15666                 if not (buildpkgonly or fetchonly or pretend):
15667                         post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15668         # "update", "system", or just process files:
15669         else:
15670                 validate_ebuild_environment(trees)
15671                 if "--pretend" not in myopts:
15672                         display_news_notification(root_config, myopts)
15673                 retval = action_build(settings, trees, mtimedb,
15674                         myopts, myaction, myfiles, spinner)
15675                 root_config = trees[settings["ROOT"]]["root_config"]
15676                 post_emerge(root_config, myopts, mtimedb, retval)
15677
15678                 return retval