Make die() to show a message indicating the repository that an ebuild came
[portage.git] / pym / _emerge / __init__.py
1 #!/usr/bin/python -O
2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id: emerge 5976 2007-02-17 09:14:53Z genone $
5
6 import array
7 from collections import deque
8 import fcntl
9 import formatter
10 import logging
11 import pwd
12 import select
13 import shlex
14 import shutil
15 import signal
16 import sys
17 import textwrap
18 import urlparse
19 import weakref
20 import gc
21 import os, stat
22 import platform
23
24 try:
25         import portage
26 except ImportError:
27         from os import path as osp
28         sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
29         import portage
30
31 from portage import digraph
32 from portage.const import NEWS_LIB_PATH
33
34 import _emerge.help
35 import portage.xpak, commands, errno, re, socket, time, types
36 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
37         nc_len, red, teal, turquoise, xtermTitle, \
38         xtermTitleReset, yellow
39 from portage.output import create_color_func
40 good = create_color_func("GOOD")
41 bad = create_color_func("BAD")
42 # white looks bad on terminals with white background
43 from portage.output import bold as white
44
45 import portage.elog
46 import portage.dep
47 portage.dep._dep_check_strict = True
48 import portage.util
49 import portage.locks
50 import portage.exception
51 from portage.data import secpass
52 from portage.elog.messages import eerror
53 from portage.util import normalize_path as normpath
54 from portage.util import writemsg, writemsg_level
55 from portage.sets import load_default_config, SETPREFIX
56 from portage.sets.base import InternalPackageSet
57
58 from itertools import chain, izip
59 from UserDict import DictMixin
60
61 try:
62         import cPickle as pickle
63 except ImportError:
64         import pickle
65
66 try:
67         import cStringIO as StringIO
68 except ImportError:
69         import StringIO
70
71 class stdout_spinner(object):
72         scroll_msgs = [
73                 "Gentoo Rocks ("+platform.system()+")",
74                 "Thank you for using Gentoo. :)",
75                 "Are you actually trying to read this?",
76                 "How many times have you stared at this?",
77                 "We are generating the cache right now",
78                 "You are paying too much attention.",
79                 "A theory is better than its explanation.",
80                 "Phasers locked on target, Captain.",
81                 "Thrashing is just virtual crashing.",
82                 "To be is to program.",
83                 "Real Users hate Real Programmers.",
84                 "When all else fails, read the instructions.",
85                 "Functionality breeds Contempt.",
86                 "The future lies ahead.",
87                 "3.1415926535897932384626433832795028841971694",
88                 "Sometimes insanity is the only alternative.",
89                 "Inaccuracy saves a world of explanation.",
90         ]
91
92         twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
93
94         def __init__(self):
95                 self.spinpos = 0
96                 self.update = self.update_twirl
97                 self.scroll_sequence = self.scroll_msgs[
98                         int(time.time() * 100) % len(self.scroll_msgs)]
99                 self.last_update = 0
100                 self.min_display_latency = 0.05
101
102         def _return_early(self):
103                 """
104                 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
105                 each update* method should return without doing any output when this
106                 method returns True.
107                 """
108                 cur_time = time.time()
109                 if cur_time - self.last_update < self.min_display_latency:
110                         return True
111                 self.last_update = cur_time
112                 return False
113
114         def update_basic(self):
115                 self.spinpos = (self.spinpos + 1) % 500
116                 if self._return_early():
117                         return
118                 if (self.spinpos % 100) == 0:
119                         if self.spinpos == 0:
120                                 sys.stdout.write(". ")
121                         else:
122                                 sys.stdout.write(".")
123                 sys.stdout.flush()
124
125         def update_scroll(self):
126                 if self._return_early():
127                         return
128                 if(self.spinpos >= len(self.scroll_sequence)):
129                         sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
130                                 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
131                 else:
132                         sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
133                 sys.stdout.flush()
134                 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
135
136         def update_twirl(self):
137                 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
138                 if self._return_early():
139                         return
140                 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
141                 sys.stdout.flush()
142
143         def update_quiet(self):
144                 return
145
146 def userquery(prompt, responses=None, colours=None):
147         """Displays a prompt and a set of responses, then waits for a response
148         which is checked against the responses and the first to match is
149         returned.  An empty response will match the first value in responses.  The
150         input buffer is *not* cleared prior to the prompt!
151
152         prompt: a String.
153         responses: a List of Strings.
154         colours: a List of Functions taking and returning a String, used to
155         process the responses for display. Typically these will be functions
156         like red() but could be e.g. lambda x: "DisplayString".
157         If responses is omitted, defaults to ["Yes", "No"], [green, red].
158         If only colours is omitted, defaults to [bold, ...].
159
160         Returns a member of the List responses. (If called without optional
161         arguments, returns "Yes" or "No".)
162         KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
163         printed."""
164         if responses is None:
165                 responses = ["Yes", "No"]
166                 colours = [
167                         create_color_func("PROMPT_CHOICE_DEFAULT"),
168                         create_color_func("PROMPT_CHOICE_OTHER")
169                 ]
170         elif colours is None:
171                 colours=[bold]
172         colours=(colours*len(responses))[:len(responses)]
173         print bold(prompt),
174         try:
175                 while True:
176                         response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
177                         for key in responses:
178                                 # An empty response will match the first value in responses.
179                                 if response.upper()==key[:len(response)].upper():
180                                         return key
181                         print "Sorry, response '%s' not understood." % response,
182         except (EOFError, KeyboardInterrupt):
183                 print "Interrupted."
184                 sys.exit(1)
185
186 actions = frozenset([
187 "clean", "config", "depclean",
188 "info", "list-sets", "metadata",
189 "prune", "regen",  "search",
190 "sync",  "unmerge",
191 ])
192 options=[
193 "--ask",          "--alphabetical",
194 "--buildpkg",     "--buildpkgonly",
195 "--changelog",    "--columns",
196 "--complete-graph",
197 "--debug",        "--deep",
198 "--digest",
199 "--emptytree",
200 "--fetchonly",    "--fetch-all-uri",
201 "--getbinpkg",    "--getbinpkgonly",
202 "--help",         "--ignore-default-opts",
203 "--keep-going",
204 "--noconfmem",
205 "--newuse",       "--nocolor",
206 "--nodeps",       "--noreplace",
207 "--nospinner",    "--oneshot",
208 "--onlydeps",     "--pretend",
209 "--quiet",        "--resume",
210 "--searchdesc",   "--selective",
211 "--skipfirst",
212 "--tree",
213 "--update",
214 "--usepkg",       "--usepkgonly",
215 "--verbose",      "--version"
216 ]
217
218 shortmapping={
219 "1":"--oneshot",
220 "a":"--ask",
221 "b":"--buildpkg",  "B":"--buildpkgonly",
222 "c":"--clean",     "C":"--unmerge",
223 "d":"--debug",     "D":"--deep",
224 "e":"--emptytree",
225 "f":"--fetchonly", "F":"--fetch-all-uri",
226 "g":"--getbinpkg", "G":"--getbinpkgonly",
227 "h":"--help",
228 "k":"--usepkg",    "K":"--usepkgonly",
229 "l":"--changelog",
230 "n":"--noreplace", "N":"--newuse",
231 "o":"--onlydeps",  "O":"--nodeps",
232 "p":"--pretend",   "P":"--prune",
233 "q":"--quiet",
234 "s":"--search",    "S":"--searchdesc",
235 "t":"--tree",
236 "u":"--update",
237 "v":"--verbose",   "V":"--version"
238 }
239
240 def emergelog(xterm_titles, mystr, short_msg=None):
241         if xterm_titles and short_msg:
242                 if "HOSTNAME" in os.environ:
243                         short_msg = os.environ["HOSTNAME"]+": "+short_msg
244                 xtermTitle(short_msg)
245         try:
246                 file_path = "/var/log/emerge.log"
247                 mylogfile = open(file_path, "a")
248                 portage.util.apply_secpass_permissions(file_path,
249                         uid=portage.portage_uid, gid=portage.portage_gid,
250                         mode=0660)
251                 mylock = None
252                 try:
253                         mylock = portage.locks.lockfile(mylogfile)
254                         # seek because we may have gotten held up by the lock.
255                         # if so, we may not be positioned at the end of the file.
256                         mylogfile.seek(0, 2)
257                         mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
258                         mylogfile.flush()
259                 finally:
260                         if mylock:
261                                 portage.locks.unlockfile(mylock)
262                         mylogfile.close()
263         except (IOError,OSError,portage.exception.PortageException), e:
264                 if secpass >= 1:
265                         print >> sys.stderr, "emergelog():",e
266
267 def countdown(secs=5, doing="Starting"):
268         if secs:
269                 print ">>> Waiting",secs,"seconds before starting..."
270                 print ">>> (Control-C to abort)...\n"+doing+" in: ",
271                 ticks=range(secs)
272                 ticks.reverse()
273                 for sec in ticks:
274                         sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
275                         sys.stdout.flush()
276                         time.sleep(1)
277                 print
278
279 # formats a size given in bytes nicely
280 def format_size(mysize):
281         if type(mysize) not in [types.IntType,types.LongType]:
282                 return str(mysize)
283         if 0 != mysize % 1024:
284                 # Always round up to the next kB so that it doesn't show 0 kB when
285                 # some small file still needs to be fetched.
286                 mysize += 1024 - mysize % 1024
287         mystr=str(mysize/1024)
288         mycount=len(mystr)
289         while (mycount > 3):
290                 mycount-=3
291                 mystr=mystr[:mycount]+","+mystr[mycount:]
292         return mystr+" kB"
293
294
295 def getgccversion(chost):
296         """
297         rtype: C{str}
298         return:  the current in-use gcc version
299         """
300
301         gcc_ver_command = 'gcc -dumpversion'
302         gcc_ver_prefix = 'gcc-'
303
304         gcc_not_found_error = red(
305         "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
306         "!!! to update the environment of this terminal and possibly\n" +
307         "!!! other terminals also.\n"
308         )
309
310         mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
311         if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
312                 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
313
314         mystatus, myoutput = commands.getstatusoutput(
315                 chost + "-" + gcc_ver_command)
316         if mystatus == os.EX_OK:
317                 return gcc_ver_prefix + myoutput
318
319         mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
320         if mystatus == os.EX_OK:
321                 return gcc_ver_prefix + myoutput
322
323         portage.writemsg(gcc_not_found_error, noiselevel=-1)
324         return "[unavailable]"
325
326 def getportageversion(portdir, target_root, profile, chost, vardb):
327         profilever = "unavailable"
328         if profile:
329                 realpath = os.path.realpath(profile)
330                 basepath   = os.path.realpath(os.path.join(portdir, "profiles"))
331                 if realpath.startswith(basepath):
332                         profilever = realpath[1 + len(basepath):]
333                 else:
334                         try:
335                                 profilever = "!" + os.readlink(profile)
336                         except (OSError):
337                                 pass
338                 del realpath, basepath
339
340         libcver=[]
341         libclist  = vardb.match("virtual/libc")
342         libclist += vardb.match("virtual/glibc")
343         libclist  = portage.util.unique_array(libclist)
344         for x in libclist:
345                 xs=portage.catpkgsplit(x)
346                 if libcver:
347                         libcver+=","+"-".join(xs[1:])
348                 else:
349                         libcver="-".join(xs[1:])
350         if libcver==[]:
351                 libcver="unavailable"
352
353         gccver = getgccversion(chost)
354         unameout=platform.release()+" "+platform.machine()
355
356         return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
357
358 def create_depgraph_params(myopts, myaction):
359         #configure emerge engine parameters
360         #
361         # self:      include _this_ package regardless of if it is merged.
362         # selective: exclude the package if it is merged
363         # recurse:   go into the dependencies
364         # deep:      go into the dependencies of already merged packages
365         # empty:     pretend nothing is merged
366         # complete:  completely account for all known dependencies
367         # remove:    build graph for use in removing packages
368         myparams = set(["recurse"])
369
370         if myaction == "remove":
371                 myparams.add("remove")
372                 myparams.add("complete")
373                 return myparams
374
375         if "--update" in myopts or \
376                 "--newuse" in myopts or \
377                 "--reinstall" in myopts or \
378                 "--noreplace" in myopts:
379                 myparams.add("selective")
380         if "--emptytree" in myopts:
381                 myparams.add("empty")
382                 myparams.discard("selective")
383         if "--nodeps" in myopts:
384                 myparams.discard("recurse")
385         if "--deep" in myopts:
386                 myparams.add("deep")
387         if "--complete-graph" in myopts:
388                 myparams.add("complete")
389         return myparams
390
391 # search functionality
392 class search(object):
393
394         #
395         # class constants
396         #
397         VERSION_SHORT=1
398         VERSION_RELEASE=2
399
400         #
401         # public interface
402         #
403         def __init__(self, root_config, spinner, searchdesc,
404                 verbose, usepkg, usepkgonly):
405                 """Searches the available and installed packages for the supplied search key.
406                 The list of available and installed packages is created at object instantiation.
407                 This makes successive searches faster."""
408                 self.settings = root_config.settings
409                 self.vartree = root_config.trees["vartree"]
410                 self.spinner = spinner
411                 self.verbose = verbose
412                 self.searchdesc = searchdesc
413                 self.root_config = root_config
414                 self.setconfig = root_config.setconfig
415                 self.matches = {"pkg" : []}
416                 self.mlen = 0
417
418                 def fake_portdb():
419                         pass
420                 self.portdb = fake_portdb
421                 for attrib in ("aux_get", "cp_all",
422                         "xmatch", "findname", "getFetchMap"):
423                         setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
424
425                 self._dbs = []
426
427                 portdb = root_config.trees["porttree"].dbapi
428                 bindb = root_config.trees["bintree"].dbapi
429                 vardb = root_config.trees["vartree"].dbapi
430
431                 if not usepkgonly and portdb._have_root_eclass_dir:
432                         self._dbs.append(portdb)
433
434                 if (usepkg or usepkgonly) and bindb.cp_all():
435                         self._dbs.append(bindb)
436
437                 self._dbs.append(vardb)
438                 self._portdb = portdb
439
440         def _cp_all(self):
441                 cp_all = set()
442                 for db in self._dbs:
443                         cp_all.update(db.cp_all())
444                 return list(sorted(cp_all))
445
446         def _aux_get(self, *args, **kwargs):
447                 for db in self._dbs:
448                         try:
449                                 return db.aux_get(*args, **kwargs)
450                         except KeyError:
451                                 pass
452                 raise
453
454         def _findname(self, *args, **kwargs):
455                 for db in self._dbs:
456                         if db is not self._portdb:
457                                 # We don't want findname to return anything
458                                 # unless it's an ebuild in a portage tree.
459                                 # Otherwise, it's already built and we don't
460                                 # care about it.
461                                 continue
462                         func = getattr(db, "findname", None)
463                         if func:
464                                 value = func(*args, **kwargs)
465                                 if value:
466                                         return value
467                 return None
468
469         def _getFetchMap(self, *args, **kwargs):
470                 for db in self._dbs:
471                         func = getattr(db, "getFetchMap", None)
472                         if func:
473                                 value = func(*args, **kwargs)
474                                 if value:
475                                         return value
476                 return {}
477
478         def _visible(self, db, cpv, metadata):
479                 installed = db is self.vartree.dbapi
480                 built = installed or db is not self._portdb
481                 pkg_type = "ebuild"
482                 if installed:
483                         pkg_type = "installed"
484                 elif built:
485                         pkg_type = "binary"
486                 return visible(self.settings,
487                         Package(type_name=pkg_type, root_config=self.root_config,
488                         cpv=cpv, built=built, installed=installed, metadata=metadata))
489
490         def _xmatch(self, level, atom):
491                 """
492                 This method does not expand old-style virtuals because it
493                 is restricted to returning matches for a single ${CATEGORY}/${PN}
494                 and old-style virual matches unreliable for that when querying
495                 multiple package databases. If necessary, old-style virtuals
496                 can be performed on atoms prior to calling this method.
497                 """
498                 cp = portage.dep_getkey(atom)
499                 if level == "match-all":
500                         matches = set()
501                         for db in self._dbs:
502                                 if hasattr(db, "xmatch"):
503                                         matches.update(db.xmatch(level, atom))
504                                 else:
505                                         matches.update(db.match(atom))
506                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
507                         db._cpv_sort_ascending(result)
508                 elif level == "match-visible":
509                         matches = set()
510                         for db in self._dbs:
511                                 if hasattr(db, "xmatch"):
512                                         matches.update(db.xmatch(level, atom))
513                                 else:
514                                         db_keys = list(db._aux_cache_keys)
515                                         for cpv in db.match(atom):
516                                                 metadata = izip(db_keys,
517                                                         db.aux_get(cpv, db_keys))
518                                                 if not self._visible(db, cpv, metadata):
519                                                         continue
520                                                 matches.add(cpv)
521                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
522                         db._cpv_sort_ascending(result)
523                 elif level == "bestmatch-visible":
524                         result = None
525                         for db in self._dbs:
526                                 if hasattr(db, "xmatch"):
527                                         cpv = db.xmatch("bestmatch-visible", atom)
528                                         if not cpv or portage.cpv_getkey(cpv) != cp:
529                                                 continue
530                                         if not result or cpv == portage.best([cpv, result]):
531                                                 result = cpv
532                                 else:
533                                         db_keys = Package.metadata_keys
534                                         # break out of this loop with highest visible
535                                         # match, checked in descending order
536                                         for cpv in reversed(db.match(atom)):
537                                                 if portage.cpv_getkey(cpv) != cp:
538                                                         continue
539                                                 metadata = izip(db_keys,
540                                                         db.aux_get(cpv, db_keys))
541                                                 if not self._visible(db, cpv, metadata):
542                                                         continue
543                                                 if not result or cpv == portage.best([cpv, result]):
544                                                         result = cpv
545                                                 break
546                 else:
547                         raise NotImplementedError(level)
548                 return result
549
550         def execute(self,searchkey):
551                 """Performs the search for the supplied search key"""
552                 match_category = 0
553                 self.searchkey=searchkey
554                 self.packagematches = []
555                 if self.searchdesc:
556                         self.searchdesc=1
557                         self.matches = {"pkg":[], "desc":[], "set":[]}
558                 else:
559                         self.searchdesc=0
560                         self.matches = {"pkg":[], "set":[]}
561                 print "Searching...   ",
562
563                 regexsearch = False
564                 if self.searchkey.startswith('%'):
565                         regexsearch = True
566                         self.searchkey = self.searchkey[1:]
567                 if self.searchkey.startswith('@'):
568                         match_category = 1
569                         self.searchkey = self.searchkey[1:]
570                 if regexsearch:
571                         self.searchre=re.compile(self.searchkey,re.I)
572                 else:
573                         self.searchre=re.compile(re.escape(self.searchkey), re.I)
574                 for package in self.portdb.cp_all():
575                         self.spinner.update()
576
577                         if match_category:
578                                 match_string  = package[:]
579                         else:
580                                 match_string  = package.split("/")[-1]
581
582                         masked=0
583                         if self.searchre.search(match_string):
584                                 if not self.portdb.xmatch("match-visible", package):
585                                         masked=1
586                                 self.matches["pkg"].append([package,masked])
587                         elif self.searchdesc: # DESCRIPTION searching
588                                 full_package = self.portdb.xmatch("bestmatch-visible", package)
589                                 if not full_package:
590                                         #no match found; we don't want to query description
591                                         full_package = portage.best(
592                                                 self.portdb.xmatch("match-all", package))
593                                         if not full_package:
594                                                 continue
595                                         else:
596                                                 masked=1
597                                 try:
598                                         full_desc = self.portdb.aux_get(
599                                                 full_package, ["DESCRIPTION"])[0]
600                                 except KeyError:
601                                         print "emerge: search: aux_get() failed, skipping"
602                                         continue
603                                 if self.searchre.search(full_desc):
604                                         self.matches["desc"].append([full_package,masked])
605
606                 self.sdict = self.setconfig.getSets()
607                 for setname in self.sdict:
608                         self.spinner.update()
609                         if match_category:
610                                 match_string = setname
611                         else:
612                                 match_string = setname.split("/")[-1]
613                         
614                         if self.searchre.search(match_string):
615                                 self.matches["set"].append([setname, False])
616                         elif self.searchdesc:
617                                 if self.searchre.search(
618                                         self.sdict[setname].getMetadata("DESCRIPTION")):
619                                         self.matches["set"].append([setname, False])
620                         
621                 self.mlen=0
622                 for mtype in self.matches:
623                         self.matches[mtype].sort()
624                         self.mlen += len(self.matches[mtype])
625
626         def addCP(self, cp):
627                 if not self.portdb.xmatch("match-all", cp):
628                         return
629                 masked = 0
630                 if not self.portdb.xmatch("bestmatch-visible", cp):
631                         masked = 1
632                 self.matches["pkg"].append([cp, masked])
633                 self.mlen += 1
634
635         def output(self):
636                 """Outputs the results of the search."""
637                 print "\b\b  \n[ Results for search key : "+white(self.searchkey)+" ]"
638                 print "[ Applications found : "+white(str(self.mlen))+" ]"
639                 print " "
640                 vardb = self.vartree.dbapi
641                 for mtype in self.matches:
642                         for match,masked in self.matches[mtype]:
643                                 full_package = None
644                                 if mtype == "pkg":
645                                         catpack = match
646                                         full_package = self.portdb.xmatch(
647                                                 "bestmatch-visible", match)
648                                         if not full_package:
649                                                 #no match found; we don't want to query description
650                                                 masked=1
651                                                 full_package = portage.best(
652                                                         self.portdb.xmatch("match-all",match))
653                                 elif mtype == "desc":
654                                         full_package = match
655                                         match        = portage.cpv_getkey(match)
656                                 elif mtype == "set":
657                                         print green("*")+"  "+white(match)
658                                         print "     ", darkgreen("Description:")+"  ", self.sdict[match].getMetadata("DESCRIPTION")
659                                         print
660                                 if full_package:
661                                         try:
662                                                 desc, homepage, license = self.portdb.aux_get(
663                                                         full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
664                                         except KeyError:
665                                                 print "emerge: search: aux_get() failed, skipping"
666                                                 continue
667                                         if masked:
668                                                 print green("*")+"  "+white(match)+" "+red("[ Masked ]")
669                                         else:
670                                                 print green("*")+"  "+white(match)
671                                         myversion = self.getVersion(full_package, search.VERSION_RELEASE)
672
673                                         mysum = [0,0]
674                                         file_size_str = None
675                                         mycat = match.split("/")[0]
676                                         mypkg = match.split("/")[1]
677                                         mycpv = match + "-" + myversion
678                                         myebuild = self.portdb.findname(mycpv)
679                                         if myebuild:
680                                                 pkgdir = os.path.dirname(myebuild)
681                                                 from portage import manifest
682                                                 mf = manifest.Manifest(
683                                                         pkgdir, self.settings["DISTDIR"])
684                                                 try:
685                                                         uri_map = self.portdb.getFetchMap(mycpv)
686                                                 except portage.exception.InvalidDependString, e:
687                                                         file_size_str = "Unknown (%s)" % (e,)
688                                                         del e
689                                                 else:
690                                                         try:
691                                                                 mysum[0] = mf.getDistfilesSize(uri_map)
692                                                         except KeyError, e:
693                                                                 file_size_str = "Unknown (missing " + \
694                                                                         "digest for %s)" % (e,)
695                                                                 del e
696
697                                         available = False
698                                         for db in self._dbs:
699                                                 if db is not vardb and \
700                                                         db.cpv_exists(mycpv):
701                                                         available = True
702                                                         if not myebuild and hasattr(db, "bintree"):
703                                                                 myebuild = db.bintree.getname(mycpv)
704                                                                 try:
705                                                                         mysum[0] = os.stat(myebuild).st_size
706                                                                 except OSError:
707                                                                         myebuild = None
708                                                         break
709
710                                         if myebuild and file_size_str is None:
711                                                 mystr = str(mysum[0] / 1024)
712                                                 mycount = len(mystr)
713                                                 while (mycount > 3):
714                                                         mycount -= 3
715                                                         mystr = mystr[:mycount] + "," + mystr[mycount:]
716                                                 file_size_str = mystr + " kB"
717
718                                         if self.verbose:
719                                                 if available:
720                                                         print "     ", darkgreen("Latest version available:"),myversion
721                                                 print "     ", self.getInstallationStatus(mycat+'/'+mypkg)
722                                                 if myebuild:
723                                                         print "      %s %s" % \
724                                                                 (darkgreen("Size of files:"), file_size_str)
725                                                 print "     ", darkgreen("Homepage:")+"     ",homepage
726                                                 print "     ", darkgreen("Description:")+"  ",desc
727                                                 print "     ", darkgreen("License:")+"      ",license
728                                                 print
729         #
730         # private interface
731         #
732         def getInstallationStatus(self,package):
733                 installed_package = self.vartree.dep_bestmatch(package)
734                 result = ""
735                 version = self.getVersion(installed_package,search.VERSION_RELEASE)
736                 if len(version) > 0:
737                         result = darkgreen("Latest version installed:")+" "+version
738                 else:
739                         result = darkgreen("Latest version installed:")+" [ Not Installed ]"
740                 return result
741
742         def getVersion(self,full_package,detail):
743                 if len(full_package) > 1:
744                         package_parts = portage.catpkgsplit(full_package)
745                         if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
746                                 result = package_parts[2]+ "-" + package_parts[3]
747                         else:
748                                 result = package_parts[2]
749                 else:
750                         result = ""
751                 return result
752
753 class RootConfig(object):
754         """This is used internally by depgraph to track information about a
755         particular $ROOT."""
756
757         pkg_tree_map = {
758                 "ebuild"    : "porttree",
759                 "binary"    : "bintree",
760                 "installed" : "vartree"
761         }
762
763         tree_pkg_map = {}
764         for k, v in pkg_tree_map.iteritems():
765                 tree_pkg_map[v] = k
766
767         def __init__(self, settings, trees, setconfig):
768                 self.trees = trees
769                 self.settings = settings
770                 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
771                 self.root = self.settings["ROOT"]
772                 self.setconfig = setconfig
773                 self.sets = self.setconfig.getSets()
774                 self.visible_pkgs = PackageVirtualDbapi(self.settings)
775
776 def create_world_atom(pkg, args_set, root_config):
777         """Create a new atom for the world file if one does not exist.  If the
778         argument atom is precise enough to identify a specific slot then a slot
779         atom will be returned. Atoms that are in the system set may also be stored
780         in world since system atoms can only match one slot while world atoms can
781         be greedy with respect to slots.  Unslotted system packages will not be
782         stored in world."""
783
784         arg_atom = args_set.findAtomForPackage(pkg)
785         if not arg_atom:
786                 return None
787         cp = portage.dep_getkey(arg_atom)
788         new_world_atom = cp
789         sets = root_config.sets
790         portdb = root_config.trees["porttree"].dbapi
791         vardb = root_config.trees["vartree"].dbapi
792         available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
793                 for cpv in portdb.match(cp))
794         slotted = len(available_slots) > 1 or \
795                 (len(available_slots) == 1 and "0" not in available_slots)
796         if not slotted:
797                 # check the vdb in case this is multislot
798                 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
799                         for cpv in vardb.match(cp))
800                 slotted = len(available_slots) > 1 or \
801                         (len(available_slots) == 1 and "0" not in available_slots)
802         if slotted and arg_atom != cp:
803                 # If the user gave a specific atom, store it as a
804                 # slot atom in the world file.
805                 slot_atom = pkg.slot_atom
806
807                 # For USE=multislot, there are a couple of cases to
808                 # handle here:
809                 #
810                 # 1) SLOT="0", but the real SLOT spontaneously changed to some
811                 #    unknown value, so just record an unslotted atom.
812                 #
813                 # 2) SLOT comes from an installed package and there is no
814                 #    matching SLOT in the portage tree.
815                 #
816                 # Make sure that the slot atom is available in either the
817                 # portdb or the vardb, since otherwise the user certainly
818                 # doesn't want the SLOT atom recorded in the world file
819                 # (case 1 above).  If it's only available in the vardb,
820                 # the user may be trying to prevent a USE=multislot
821                 # package from being removed by --depclean (case 2 above).
822
823                 mydb = portdb
824                 if not portdb.match(slot_atom):
825                         # SLOT seems to come from an installed multislot package
826                         mydb = vardb
827                 # If there is no installed package matching the SLOT atom,
828                 # it probably changed SLOT spontaneously due to USE=multislot,
829                 # so just record an unslotted atom.
830                 if vardb.match(slot_atom):
831                         # Now verify that the argument is precise
832                         # enough to identify a specific slot.
833                         matches = mydb.match(arg_atom)
834                         matched_slots = set()
835                         for cpv in matches:
836                                 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
837                         if len(matched_slots) == 1:
838                                 new_world_atom = slot_atom
839
840         if new_world_atom == sets["world"].findAtomForPackage(pkg):
841                 # Both atoms would be identical, so there's nothing to add.
842                 return None
843         if not slotted:
844                 # Unlike world atoms, system atoms are not greedy for slots, so they
845                 # can't be safely excluded from world if they are slotted.
846                 system_atom = sets["system"].findAtomForPackage(pkg)
847                 if system_atom:
848                         if not portage.dep_getkey(system_atom).startswith("virtual/"):
849                                 return None
850                         # System virtuals aren't safe to exclude from world since they can
851                         # match multiple old-style virtuals but only one of them will be
852                         # pulled in by update or depclean.
853                         providers = portdb.mysettings.getvirtuals().get(
854                                 portage.dep_getkey(system_atom))
855                         if providers and len(providers) == 1 and providers[0] == cp:
856                                 return None
857         return new_world_atom
858
859 def filter_iuse_defaults(iuse):
860         for flag in iuse:
861                 if flag.startswith("+") or flag.startswith("-"):
862                         yield flag[1:]
863                 else:
864                         yield flag
865
866 class SlotObject(object):
867         __slots__ = ("__weakref__",)
868
869         def __init__(self, **kwargs):
870                 classes = [self.__class__]
871                 while classes:
872                         c = classes.pop()
873                         if c is SlotObject:
874                                 continue
875                         classes.extend(c.__bases__)
876                         slots = getattr(c, "__slots__", None)
877                         if not slots:
878                                 continue
879                         for myattr in slots:
880                                 myvalue = kwargs.get(myattr, None)
881                                 setattr(self, myattr, myvalue)
882
883         def copy(self):
884                 """
885                 Create a new instance and copy all attributes
886                 defined from __slots__ (including those from
887                 inherited classes).
888                 """
889                 obj = self.__class__()
890
891                 classes = [self.__class__]
892                 while classes:
893                         c = classes.pop()
894                         if c is SlotObject:
895                                 continue
896                         classes.extend(c.__bases__)
897                         slots = getattr(c, "__slots__", None)
898                         if not slots:
899                                 continue
900                         for myattr in slots:
901                                 setattr(obj, myattr, getattr(self, myattr))
902
903                 return obj
904
905 class AbstractDepPriority(SlotObject):
906         __slots__ = ("buildtime", "runtime", "runtime_post")
907
908         def __lt__(self, other):
909                 return self.__int__() < other
910
911         def __le__(self, other):
912                 return self.__int__() <= other
913
914         def __eq__(self, other):
915                 return self.__int__() == other
916
917         def __ne__(self, other):
918                 return self.__int__() != other
919
920         def __gt__(self, other):
921                 return self.__int__() > other
922
923         def __ge__(self, other):
924                 return self.__int__() >= other
925
926         def copy(self):
927                 import copy
928                 return copy.copy(self)
929
930 class DepPriority(AbstractDepPriority):
931         """
932                 This class generates an integer priority level based of various
933                 attributes of the dependency relationship.  Attributes can be assigned
934                 at any time and the new integer value will be generated on calls to the
935                 __int__() method.  Rich comparison operators are supported.
936
937                 The boolean attributes that affect the integer value are "satisfied",
938                 "buildtime", "runtime", and "system".  Various combinations of
939                 attributes lead to the following priority levels:
940
941                 Combination of properties           Priority  Category
942
943                 not satisfied and buildtime            0       HARD
944                 not satisfied and runtime             -1       MEDIUM
945                 not satisfied and runtime_post        -2       MEDIUM_SOFT
946                 satisfied and buildtime and rebuild   -3       SOFT
947                 satisfied and buildtime               -4       SOFT
948                 satisfied and runtime                 -5       SOFT
949                 satisfied and runtime_post            -6       SOFT
950                 (none of the above)                   -6       SOFT
951
952                 Several integer constants are defined for categorization of priority
953                 levels:
954
955                 MEDIUM   The upper boundary for medium dependencies.
956                 MEDIUM_SOFT   The upper boundary for medium-soft dependencies.
957                 SOFT     The upper boundary for soft dependencies.
958                 MIN      The lower boundary for soft dependencies.
959         """
960         __slots__ = ("satisfied", "rebuild")
961         MEDIUM = -1
962         MEDIUM_SOFT = -2
963         SOFT   = -3
964         MIN    = -6
965
966         def __int__(self):
967                 if not self.satisfied:
968                         if self.buildtime:
969                                 return 0
970                         if self.runtime:
971                                 return -1
972                         if self.runtime_post:
973                                 return -2
974                 if self.buildtime:
975                         if self.rebuild:
976                                 return -3
977                         return -4
978                 if self.runtime:
979                         return -5
980                 if self.runtime_post:
981                         return -6
982                 return -6
983
984         def __str__(self):
985                 myvalue = self.__int__()
986                 if myvalue > self.MEDIUM:
987                         return "hard"
988                 if myvalue > self.MEDIUM_SOFT:
989                         return "medium"
990                 if myvalue > self.SOFT:
991                         return "medium-soft"
992                 return "soft"
993
994 class BlockerDepPriority(DepPriority):
995         __slots__ = ()
996         def __int__(self):
997                 return 0
998
999 BlockerDepPriority.instance = BlockerDepPriority()
1000
1001 class UnmergeDepPriority(AbstractDepPriority):
1002         __slots__ = ("satisfied",)
1003         """
1004         Combination of properties           Priority  Category
1005
1006         runtime                                0       HARD
1007         runtime_post                          -1       HARD
1008         buildtime                             -2       SOFT
1009         (none of the above)                   -2       SOFT
1010         """
1011
1012         MAX    =  0
1013         SOFT   = -2
1014         MIN    = -2
1015
1016         def __int__(self):
1017                 if self.runtime:
1018                         return 0
1019                 if self.runtime_post:
1020                         return -1
1021                 if self.buildtime:
1022                         return -2
1023                 return -2
1024
1025         def __str__(self):
1026                 myvalue = self.__int__()
1027                 if myvalue > self.SOFT:
1028                         return "hard"
1029                 return "soft"
1030
1031 class FakeVartree(portage.vartree):
1032         """This is implements an in-memory copy of a vartree instance that provides
1033         all the interfaces required for use by the depgraph.  The vardb is locked
1034         during the constructor call just long enough to read a copy of the
1035         installed package information.  This allows the depgraph to do it's
1036         dependency calculations without holding a lock on the vardb.  It also
1037         allows things like vardb global updates to be done in memory so that the
1038         user doesn't necessarily need write access to the vardb in cases where
1039         global updates are necessary (updates are performed when necessary if there
1040         is not a matching ebuild in the tree)."""
1041         def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1042                 self._root_config = root_config
1043                 if pkg_cache is None:
1044                         pkg_cache = {}
1045                 real_vartree = root_config.trees["vartree"]
1046                 portdb = root_config.trees["porttree"].dbapi
1047                 self.root = real_vartree.root
1048                 self.settings = real_vartree.settings
1049                 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1050                 if "_mtime_" not in mykeys:
1051                         mykeys.append("_mtime_")
1052                 self._db_keys = mykeys
1053                 self._pkg_cache = pkg_cache
1054                 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1055                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1056                 try:
1057                         # At least the parent needs to exist for the lock file.
1058                         portage.util.ensure_dirs(vdb_path)
1059                 except portage.exception.PortageException:
1060                         pass
1061                 vdb_lock = None
1062                 try:
1063                         if acquire_lock and os.access(vdb_path, os.W_OK):
1064                                 vdb_lock = portage.locks.lockdir(vdb_path)
1065                         real_dbapi = real_vartree.dbapi
1066                         slot_counters = {}
1067                         for cpv in real_dbapi.cpv_all():
1068                                 cache_key = ("installed", self.root, cpv, "nomerge")
1069                                 pkg = self._pkg_cache.get(cache_key)
1070                                 if pkg is not None:
1071                                         metadata = pkg.metadata
1072                                 else:
1073                                         metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1074                                 myslot = metadata["SLOT"]
1075                                 mycp = portage.dep_getkey(cpv)
1076                                 myslot_atom = "%s:%s" % (mycp, myslot)
1077                                 try:
1078                                         mycounter = long(metadata["COUNTER"])
1079                                 except ValueError:
1080                                         mycounter = 0
1081                                         metadata["COUNTER"] = str(mycounter)
1082                                 other_counter = slot_counters.get(myslot_atom, None)
1083                                 if other_counter is not None:
1084                                         if other_counter > mycounter:
1085                                                 continue
1086                                 slot_counters[myslot_atom] = mycounter
1087                                 if pkg is None:
1088                                         pkg = Package(built=True, cpv=cpv,
1089                                                 installed=True, metadata=metadata,
1090                                                 root_config=root_config, type_name="installed")
1091                                 self._pkg_cache[pkg] = pkg
1092                                 self.dbapi.cpv_inject(pkg)
1093                         real_dbapi.flush_cache()
1094                 finally:
1095                         if vdb_lock:
1096                                 portage.locks.unlockdir(vdb_lock)
1097                 # Populate the old-style virtuals using the cached values.
1098                 if not self.settings.treeVirtuals:
1099                         self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1100                                 portage.getCPFromCPV, self.get_all_provides())
1101
1102                 # Intialize variables needed for lazy cache pulls of the live ebuild
1103                 # metadata.  This ensures that the vardb lock is released ASAP, without
1104                 # being delayed in case cache generation is triggered.
1105                 self._aux_get = self.dbapi.aux_get
1106                 self.dbapi.aux_get = self._aux_get_wrapper
1107                 self._match = self.dbapi.match
1108                 self.dbapi.match = self._match_wrapper
1109                 self._aux_get_history = set()
1110                 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1111                 self._portdb = portdb
1112                 self._global_updates = None
1113
1114         def _match_wrapper(self, cpv, use_cache=1):
1115                 """
1116                 Make sure the metadata in Package instances gets updated for any
1117                 cpv that is returned from a match() call, since the metadata can
1118                 be accessed directly from the Package instance instead of via
1119                 aux_get().
1120                 """
1121                 matches = self._match(cpv, use_cache=use_cache)
1122                 for cpv in matches:
1123                         if cpv in self._aux_get_history:
1124                                 continue
1125                         self._aux_get_wrapper(cpv, [])
1126                 return matches
1127
1128         def _aux_get_wrapper(self, pkg, wants):
1129                 if pkg in self._aux_get_history:
1130                         return self._aux_get(pkg, wants)
1131                 self._aux_get_history.add(pkg)
1132                 try:
1133                         # Use the live ebuild metadata if possible.
1134                         live_metadata = dict(izip(self._portdb_keys,
1135                                 self._portdb.aux_get(pkg, self._portdb_keys)))
1136                         if not portage.eapi_is_supported(live_metadata["EAPI"]):
1137                                 raise KeyError(pkg)
1138                         self.dbapi.aux_update(pkg, live_metadata)
1139                 except (KeyError, portage.exception.PortageException):
1140                         if self._global_updates is None:
1141                                 self._global_updates = \
1142                                         grab_global_updates(self._portdb.porttree_root)
1143                         perform_global_updates(
1144                                 pkg, self.dbapi, self._global_updates)
1145                 return self._aux_get(pkg, wants)
1146
1147         def sync(self, acquire_lock=1):
1148                 """
1149                 Call this method to synchronize state with the real vardb
1150                 after one or more packages may have been installed or
1151                 uninstalled.
1152                 """
1153                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1154                 try:
1155                         # At least the parent needs to exist for the lock file.
1156                         portage.util.ensure_dirs(vdb_path)
1157                 except portage.exception.PortageException:
1158                         pass
1159                 vdb_lock = None
1160                 try:
1161                         if acquire_lock and os.access(vdb_path, os.W_OK):
1162                                 vdb_lock = portage.locks.lockdir(vdb_path)
1163                         self._sync()
1164                 finally:
1165                         if vdb_lock:
1166                                 portage.locks.unlockdir(vdb_lock)
1167
1168         def _sync(self):
1169
1170                 real_vardb = self._root_config.trees["vartree"].dbapi
1171                 current_cpv_set = frozenset(real_vardb.cpv_all())
1172                 pkg_vardb = self.dbapi
1173                 aux_get_history = self._aux_get_history
1174
1175                 # Remove any packages that have been uninstalled.
1176                 for pkg in list(pkg_vardb):
1177                         if pkg.cpv not in current_cpv_set:
1178                                 pkg_vardb.cpv_remove(pkg)
1179                                 aux_get_history.discard(pkg.cpv)
1180
1181                 # Validate counters and timestamps.
1182                 slot_counters = {}
1183                 root = self.root
1184                 validation_keys = ["COUNTER", "_mtime_"]
1185                 for cpv in current_cpv_set:
1186
1187                         pkg_hash_key = ("installed", root, cpv, "nomerge")
1188                         pkg = pkg_vardb.get(pkg_hash_key)
1189                         if pkg is not None:
1190                                 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1191                                 try:
1192                                         counter = long(counter)
1193                                 except ValueError:
1194                                         counter = 0
1195
1196                                 if counter != pkg.counter or \
1197                                         mtime != pkg.mtime:
1198                                         pkg_vardb.cpv_remove(pkg)
1199                                         aux_get_history.discard(pkg.cpv)
1200                                         pkg = None
1201
1202                         if pkg is None:
1203                                 pkg = self._pkg(cpv)
1204
1205                         other_counter = slot_counters.get(pkg.slot_atom)
1206                         if other_counter is not None:
1207                                 if other_counter > pkg.counter:
1208                                         continue
1209
1210                         slot_counters[pkg.slot_atom] = pkg.counter
1211                         pkg_vardb.cpv_inject(pkg)
1212
1213                 real_vardb.flush_cache()
1214
1215         def _pkg(self, cpv):
1216                 root_config = self._root_config
1217                 real_vardb = root_config.trees["vartree"].dbapi
1218                 pkg = Package(cpv=cpv, installed=True,
1219                         metadata=izip(self._db_keys,
1220                         real_vardb.aux_get(cpv, self._db_keys)),
1221                         root_config=root_config,
1222                         type_name="installed")
1223
1224                 try:
1225                         mycounter = long(pkg.metadata["COUNTER"])
1226                 except ValueError:
1227                         mycounter = 0
1228                         pkg.metadata["COUNTER"] = str(mycounter)
1229
1230                 return pkg
1231
1232 def grab_global_updates(portdir):
1233         from portage.update import grab_updates, parse_updates
1234         updpath = os.path.join(portdir, "profiles", "updates")
1235         try:
1236                 rawupdates = grab_updates(updpath)
1237         except portage.exception.DirectoryNotFound:
1238                 rawupdates = []
1239         upd_commands = []
1240         for mykey, mystat, mycontent in rawupdates:
1241                 commands, errors = parse_updates(mycontent)
1242                 upd_commands.extend(commands)
1243         return upd_commands
1244
1245 def perform_global_updates(mycpv, mydb, mycommands):
1246         from portage.update import update_dbentries
1247         aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1248         aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1249         updates = update_dbentries(mycommands, aux_dict)
1250         if updates:
1251                 mydb.aux_update(mycpv, updates)
1252
1253 def visible(pkgsettings, pkg):
1254         """
1255         Check if a package is visible. This can raise an InvalidDependString
1256         exception if LICENSE is invalid.
1257         TODO: optionally generate a list of masking reasons
1258         @rtype: Boolean
1259         @returns: True if the package is visible, False otherwise.
1260         """
1261         if not pkg.metadata["SLOT"]:
1262                 return False
1263         if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1264                 if not pkgsettings._accept_chost(pkg):
1265                         return False
1266         eapi = pkg.metadata["EAPI"]
1267         if not portage.eapi_is_supported(eapi):
1268                 return False
1269         if not pkg.installed:
1270                 if portage._eapi_is_deprecated(eapi):
1271                         return False
1272                 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1273                         return False
1274         if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1275                 return False
1276         if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1277                 return False
1278         try:
1279                 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1280                         return False
1281         except portage.exception.InvalidDependString:
1282                 return False
1283         return True
1284
1285 def get_masking_status(pkg, pkgsettings, root_config):
1286
1287         mreasons = portage.getmaskingstatus(
1288                 pkg, settings=pkgsettings,
1289                 portdb=root_config.trees["porttree"].dbapi)
1290
1291         if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1292                 if not pkgsettings._accept_chost(pkg):
1293                         mreasons.append("CHOST: %s" % \
1294                                 pkg.metadata["CHOST"])
1295
1296         if not pkg.metadata["SLOT"]:
1297                 mreasons.append("invalid: SLOT is undefined")
1298
1299         return mreasons
1300
1301 def get_mask_info(root_config, cpv, pkgsettings,
1302         db, pkg_type, built, installed, db_keys):
1303         eapi_masked = False
1304         try:
1305                 metadata = dict(izip(db_keys,
1306                         db.aux_get(cpv, db_keys)))
1307         except KeyError:
1308                 metadata = None
1309         if metadata and not built:
1310                 pkgsettings.setcpv(cpv, mydb=metadata)
1311                 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1312         if metadata is None:
1313                 mreasons = ["corruption"]
1314         else:
1315                 pkg = Package(type_name=pkg_type, root_config=root_config,
1316                         cpv=cpv, built=built, installed=installed, metadata=metadata)
1317                 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1318         return metadata, mreasons
1319
1320 def show_masked_packages(masked_packages):
1321         shown_licenses = set()
1322         shown_comments = set()
1323         # Maybe there is both an ebuild and a binary. Only
1324         # show one of them to avoid redundant appearance.
1325         shown_cpvs = set()
1326         have_eapi_mask = False
1327         for (root_config, pkgsettings, cpv,
1328                 metadata, mreasons) in masked_packages:
1329                 if cpv in shown_cpvs:
1330                         continue
1331                 shown_cpvs.add(cpv)
1332                 comment, filename = None, None
1333                 if "package.mask" in mreasons:
1334                         comment, filename = \
1335                                 portage.getmaskingreason(
1336                                 cpv, metadata=metadata,
1337                                 settings=pkgsettings,
1338                                 portdb=root_config.trees["porttree"].dbapi,
1339                                 return_location=True)
1340                 missing_licenses = []
1341                 if metadata:
1342                         if not portage.eapi_is_supported(metadata["EAPI"]):
1343                                 have_eapi_mask = True
1344                         try:
1345                                 missing_licenses = \
1346                                         pkgsettings._getMissingLicenses(
1347                                                 cpv, metadata)
1348                         except portage.exception.InvalidDependString:
1349                                 # This will have already been reported
1350                                 # above via mreasons.
1351                                 pass
1352
1353                 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1354                 if comment and comment not in shown_comments:
1355                         print filename+":"
1356                         print comment
1357                         shown_comments.add(comment)
1358                 portdb = root_config.trees["porttree"].dbapi
1359                 for l in missing_licenses:
1360                         l_path = portdb.findLicensePath(l)
1361                         if l in shown_licenses:
1362                                 continue
1363                         msg = ("A copy of the '%s' license" + \
1364                         " is located at '%s'.") % (l, l_path)
1365                         print msg
1366                         print
1367                         shown_licenses.add(l)
1368         return have_eapi_mask
1369
1370 class Task(SlotObject):
1371         __slots__ = ("_hash_key", "_hash_value")
1372
1373         def _get_hash_key(self):
1374                 hash_key = getattr(self, "_hash_key", None)
1375                 if hash_key is None:
1376                         raise NotImplementedError(self)
1377                 return hash_key
1378
1379         def __eq__(self, other):
1380                 return self._get_hash_key() == other
1381
1382         def __ne__(self, other):
1383                 return self._get_hash_key() != other
1384
1385         def __hash__(self):
1386                 hash_value = getattr(self, "_hash_value", None)
1387                 if hash_value is None:
1388                         self._hash_value = hash(self._get_hash_key())
1389                 return self._hash_value
1390
1391         def __len__(self):
1392                 return len(self._get_hash_key())
1393
1394         def __getitem__(self, key):
1395                 return self._get_hash_key()[key]
1396
1397         def __iter__(self):
1398                 return iter(self._get_hash_key())
1399
1400         def __contains__(self, key):
1401                 return key in self._get_hash_key()
1402
1403         def __str__(self):
1404                 return str(self._get_hash_key())
1405
1406 class Blocker(Task):
1407
1408         __hash__ = Task.__hash__
1409         __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1410
1411         def __init__(self, **kwargs):
1412                 Task.__init__(self, **kwargs)
1413                 self.cp = portage.dep_getkey(self.atom)
1414
1415         def _get_hash_key(self):
1416                 hash_key = getattr(self, "_hash_key", None)
1417                 if hash_key is None:
1418                         self._hash_key = \
1419                                 ("blocks", self.root, self.atom, self.eapi)
1420                 return self._hash_key
1421
1422 class Package(Task):
1423
1424         __hash__ = Task.__hash__
1425         __slots__ = ("built", "cpv", "depth",
1426                 "installed", "metadata", "onlydeps", "operation",
1427                 "root_config", "type_name",
1428                 "category", "counter", "cp", "cpv_split",
1429                 "inherited", "iuse", "mtime",
1430                 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1431
1432         metadata_keys = [
1433                 "CHOST", "COUNTER", "DEPEND", "EAPI",
1434                 "INHERITED", "IUSE", "KEYWORDS",
1435                 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1436                 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1437
1438         def __init__(self, **kwargs):
1439                 Task.__init__(self, **kwargs)
1440                 self.root = self.root_config.root
1441                 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1442                 self.cp = portage.cpv_getkey(self.cpv)
1443                 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, self.slot))
1444                 self.category, self.pf = portage.catsplit(self.cpv)
1445                 self.cpv_split = portage.catpkgsplit(self.cpv)
1446                 self.pv_split = self.cpv_split[1:]
1447
1448         class _use(object):
1449
1450                 __slots__ = ("__weakref__", "enabled")
1451
1452                 def __init__(self, use):
1453                         self.enabled = frozenset(use)
1454
1455         class _iuse(object):
1456
1457                 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1458
1459                 def __init__(self, tokens, iuse_implicit):
1460                         self.tokens = tuple(tokens)
1461                         self.iuse_implicit = iuse_implicit
1462                         enabled = []
1463                         disabled = []
1464                         other = []
1465                         for x in tokens:
1466                                 prefix = x[:1]
1467                                 if prefix == "+":
1468                                         enabled.append(x[1:])
1469                                 elif prefix == "-":
1470                                         disabled.append(x[1:])
1471                                 else:
1472                                         other.append(x)
1473                         self.enabled = frozenset(enabled)
1474                         self.disabled = frozenset(disabled)
1475                         self.all = frozenset(chain(enabled, disabled, other))
1476
1477                 def __getattribute__(self, name):
1478                         if name == "regex":
1479                                 try:
1480                                         return object.__getattribute__(self, "regex")
1481                                 except AttributeError:
1482                                         all = object.__getattribute__(self, "all")
1483                                         iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1484                                         # Escape anything except ".*" which is supposed
1485                                         # to pass through from _get_implicit_iuse()
1486                                         regex = (re.escape(x) for x in chain(all, iuse_implicit))
1487                                         regex = "^(%s)$" % "|".join(regex)
1488                                         regex = regex.replace("\\.\\*", ".*")
1489                                         self.regex = re.compile(regex)
1490                         return object.__getattribute__(self, name)
1491
1492         def _get_hash_key(self):
1493                 hash_key = getattr(self, "_hash_key", None)
1494                 if hash_key is None:
1495                         if self.operation is None:
1496                                 self.operation = "merge"
1497                                 if self.onlydeps or self.installed:
1498                                         self.operation = "nomerge"
1499                         self._hash_key = \
1500                                 (self.type_name, self.root, self.cpv, self.operation)
1501                 return self._hash_key
1502
1503         def __lt__(self, other):
1504                 if other.cp != self.cp:
1505                         return False
1506                 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1507                         return True
1508                 return False
1509
1510         def __le__(self, other):
1511                 if other.cp != self.cp:
1512                         return False
1513                 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1514                         return True
1515                 return False
1516
1517         def __gt__(self, other):
1518                 if other.cp != self.cp:
1519                         return False
1520                 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1521                         return True
1522                 return False
1523
1524         def __ge__(self, other):
1525                 if other.cp != self.cp:
1526                         return False
1527                 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1528                         return True
1529                 return False
1530
1531 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1532         if not x.startswith("UNUSED_"))
1533 _all_metadata_keys.discard("CDEPEND")
1534 _all_metadata_keys.update(Package.metadata_keys)
1535
1536 from portage.cache.mappings import slot_dict_class
1537 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1538
1539 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1540         """
1541         Detect metadata updates and synchronize Package attributes.
1542         """
1543
1544         __slots__ = ("_pkg",)
1545         _wrapped_keys = frozenset(
1546                 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1547
1548         def __init__(self, pkg, metadata):
1549                 _PackageMetadataWrapperBase.__init__(self)
1550                 self._pkg = pkg
1551                 self.update(metadata)
1552
1553         def __setitem__(self, k, v):
1554                 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1555                 if k in self._wrapped_keys:
1556                         getattr(self, "_set_" + k.lower())(k, v)
1557
1558         def _set_inherited(self, k, v):
1559                 if isinstance(v, basestring):
1560                         v = frozenset(v.split())
1561                 self._pkg.inherited = v
1562
1563         def _set_iuse(self, k, v):
1564                 self._pkg.iuse = self._pkg._iuse(
1565                         v.split(), self._pkg.root_config.iuse_implicit)
1566
1567         def _set_slot(self, k, v):
1568                 self._pkg.slot = v
1569
1570         def _set_use(self, k, v):
1571                 self._pkg.use = self._pkg._use(v.split())
1572
1573         def _set_counter(self, k, v):
1574                 if isinstance(v, basestring):
1575                         try:
1576                                 v = long(v.strip())
1577                         except ValueError:
1578                                 v = 0
1579                 self._pkg.counter = v
1580
1581         def _set__mtime_(self, k, v):
1582                 if isinstance(v, basestring):
1583                         try:
1584                                 v = long(v.strip())
1585                         except ValueError:
1586                                 v = 0
1587                 self._pkg.mtime = v
1588
1589 class EbuildFetchonly(SlotObject):
1590
1591         __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1592
1593         def execute(self):
1594                 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1595                 # ensuring sane $PWD (bug #239560) and storing elog
1596                 # messages. Use a private temp directory, in order
1597                 # to avoid locking the main one.
1598                 settings = self.settings
1599                 global_tmpdir = settings["PORTAGE_TMPDIR"]
1600                 from tempfile import mkdtemp
1601                 try:
1602                         private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1603                 except OSError, e:
1604                         if e.errno != portage.exception.PermissionDenied.errno:
1605                                 raise
1606                         raise portage.exception.PermissionDenied(global_tmpdir)
1607                 settings["PORTAGE_TMPDIR"] = private_tmpdir
1608                 settings.backup_changes("PORTAGE_TMPDIR")
1609                 try:
1610                         retval = self._execute()
1611                 finally:
1612                         settings["PORTAGE_TMPDIR"] = global_tmpdir
1613                         settings.backup_changes("PORTAGE_TMPDIR")
1614                         shutil.rmtree(private_tmpdir)
1615                 return retval
1616
1617         def _execute(self):
1618                 settings = self.settings
1619                 pkg = self.pkg
1620                 root_config = pkg.root_config
1621                 portdb = root_config.trees["porttree"].dbapi
1622                 ebuild_path = portdb.findname(pkg.cpv)
1623                 settings.setcpv(pkg)
1624                 debug = settings.get("PORTAGE_DEBUG") == "1"
1625                 use_cache = 1 # always true
1626                 portage.doebuild_environment(ebuild_path, "fetch",
1627                         root_config.root, settings, debug, use_cache, portdb)
1628                 portage.prepare_build_dirs(self.pkg.root, self.settings, 0)
1629
1630                 retval = portage.doebuild(ebuild_path, "fetch",
1631                         self.settings["ROOT"], self.settings, debug=debug,
1632                         listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1633                         mydbapi=portdb, tree="porttree")
1634
1635                 if retval != os.EX_OK:
1636                         msg = "Fetch failed for '%s'" % (pkg.cpv,)
1637                         eerror(msg, phase="unpack", key=pkg.cpv)
1638
1639                 portage.elog.elog_process(self.pkg.cpv, self.settings)
1640                 return retval
1641
1642 class PollConstants(object):
1643
1644         """
1645         Provides POLL* constants that are equivalent to those from the
1646         select module, for use by PollSelectAdapter.
1647         """
1648
1649         names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1650         v = 1
1651         for k in names:
1652                 locals()[k] = getattr(select, k, v)
1653                 v *= 2
1654         del k, v
1655
1656 class AsynchronousTask(SlotObject):
1657         """
1658         Subclasses override _wait() and _poll() so that calls
1659         to public methods can be wrapped for implementing
1660         hooks such as exit listener notification.
1661
1662         Sublasses should call self.wait() to notify exit listeners after
1663         the task is complete and self.returncode has been set.
1664         """
1665
1666         __slots__ = ("background", "cancelled", "returncode") + \
1667                 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1668
1669         def start(self):
1670                 """
1671                 Start an asynchronous task and then return as soon as possible.
1672                 """
1673                 self._start()
1674                 self._start_hook()
1675
1676         def _start(self):
1677                 raise NotImplementedError(self)
1678
1679         def isAlive(self):
1680                 return self.returncode is None
1681
1682         def poll(self):
1683                 self._wait_hook()
1684                 return self._poll()
1685
1686         def _poll(self):
1687                 return self.returncode
1688
1689         def wait(self):
1690                 if self.returncode is None:
1691                         self._wait()
1692                 self._wait_hook()
1693                 return self.returncode
1694
1695         def _wait(self):
1696                 return self.returncode
1697
1698         def cancel(self):
1699                 self.cancelled = True
1700                 self.wait()
1701
1702         def addStartListener(self, f):
1703                 """
1704                 The function will be called with one argument, a reference to self.
1705                 """
1706                 if self._start_listeners is None:
1707                         self._start_listeners = []
1708                 self._start_listeners.append(f)
1709
1710         def removeStartListener(self, f):
1711                 if self._start_listeners is None:
1712                         return
1713                 self._start_listeners.remove(f)
1714
1715         def _start_hook(self):
1716                 if self._start_listeners is not None:
1717                         start_listeners = self._start_listeners
1718                         self._start_listeners = None
1719
1720                         for f in start_listeners:
1721                                 f(self)
1722
1723         def addExitListener(self, f):
1724                 """
1725                 The function will be called with one argument, a reference to self.
1726                 """
1727                 if self._exit_listeners is None:
1728                         self._exit_listeners = []
1729                 self._exit_listeners.append(f)
1730
1731         def removeExitListener(self, f):
1732                 if self._exit_listeners is None:
1733                         if self._exit_listener_stack is not None:
1734                                 self._exit_listener_stack.remove(f)
1735                         return
1736                 self._exit_listeners.remove(f)
1737
1738         def _wait_hook(self):
1739                 """
1740                 Call this method after the task completes, just before returning
1741                 the returncode from wait() or poll(). This hook is
1742                 used to trigger exit listeners when the returncode first
1743                 becomes available.
1744                 """
1745                 if self.returncode is not None and \
1746                         self._exit_listeners is not None:
1747
1748                         # This prevents recursion, in case one of the
1749                         # exit handlers triggers this method again by
1750                         # calling wait(). Use a stack that gives
1751                         # removeExitListener() an opportunity to consume
1752                         # listeners from the stack, before they can get
1753                         # called below. This is necessary because a call
1754                         # to one exit listener may result in a call to
1755                         # removeExitListener() for another listener on
1756                         # the stack. That listener needs to be removed
1757                         # from the stack since it would be inconsistent
1758                         # to call it after it has been been passed into
1759                         # removeExitListener().
1760                         self._exit_listener_stack = self._exit_listeners
1761                         self._exit_listeners = None
1762
1763                         self._exit_listener_stack.reverse()
1764                         while self._exit_listener_stack:
1765                                 self._exit_listener_stack.pop()(self)
1766
1767 class AbstractPollTask(AsynchronousTask):
1768
1769         __slots__ = ("scheduler",) + \
1770                 ("_registered",)
1771
1772         _bufsize = 4096
1773         _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1774         _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1775                 _exceptional_events
1776
1777         def _unregister(self):
1778                 raise NotImplementedError(self)
1779
1780         def _unregister_if_appropriate(self, event):
1781                 if self._registered:
1782                         if event & self._exceptional_events:
1783                                 self._unregister()
1784                                 self.cancel()
1785                         elif event & PollConstants.POLLHUP:
1786                                 self._unregister()
1787                                 self.wait()
1788
1789 class PipeReader(AbstractPollTask):
1790
1791         """
1792         Reads output from one or more files and saves it in memory,
1793         for retrieval via the getvalue() method. This is driven by
1794         the scheduler's poll() loop, so it runs entirely within the
1795         current process.
1796         """
1797
1798         __slots__ = ("input_files",) + \
1799                 ("_read_data", "_reg_ids")
1800
1801         def _start(self):
1802                 self._reg_ids = set()
1803                 self._read_data = []
1804                 for k, f in self.input_files.iteritems():
1805                         fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1806                                 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1807                         self._reg_ids.add(self.scheduler.register(f.fileno(),
1808                                 self._registered_events, self._output_handler))
1809                 self._registered = True
1810
1811         def isAlive(self):
1812                 return self._registered
1813
1814         def cancel(self):
1815                 if self.returncode is None:
1816                         self.returncode = 1
1817                         self.cancelled = True
1818                 self.wait()
1819
1820         def _wait(self):
1821                 if self.returncode is not None:
1822                         return self.returncode
1823
1824                 if self._registered:
1825                         self.scheduler.schedule(self._reg_ids)
1826                         self._unregister()
1827
1828                 self.returncode = os.EX_OK
1829                 return self.returncode
1830
1831         def getvalue(self):
1832                 """Retrieve the entire contents"""
1833                 return "".join(self._read_data)
1834
1835         def close(self):
1836                 """Free the memory buffer."""
1837                 self._read_data = None
1838
1839         def _output_handler(self, fd, event):
1840
1841                 if event & PollConstants.POLLIN:
1842
1843                         for f in self.input_files.itervalues():
1844                                 if fd == f.fileno():
1845                                         break
1846
1847                         buf = array.array('B')
1848                         try:
1849                                 buf.fromfile(f, self._bufsize)
1850                         except EOFError:
1851                                 pass
1852
1853                         if buf:
1854                                 self._read_data.append(buf.tostring())
1855                         else:
1856                                 self._unregister()
1857                                 self.wait()
1858
1859                 self._unregister_if_appropriate(event)
1860                 return self._registered
1861
1862         def _unregister(self):
1863                 """
1864                 Unregister from the scheduler and close open files.
1865                 """
1866
1867                 self._registered = False
1868
1869                 if self._reg_ids is not None:
1870                         for reg_id in self._reg_ids:
1871                                 self.scheduler.unregister(reg_id)
1872                         self._reg_ids = None
1873
1874                 if self.input_files is not None:
1875                         for f in self.input_files.itervalues():
1876                                 f.close()
1877                         self.input_files = None
1878
1879 class CompositeTask(AsynchronousTask):
1880
1881         __slots__ = ("scheduler",) + ("_current_task",)
1882
1883         def isAlive(self):
1884                 return self._current_task is not None
1885
1886         def cancel(self):
1887                 self.cancelled = True
1888                 if self._current_task is not None:
1889                         self._current_task.cancel()
1890
1891         def _poll(self):
1892                 """
1893                 This does a loop calling self._current_task.poll()
1894                 repeatedly as long as the value of self._current_task
1895                 keeps changing. It calls poll() a maximum of one time
1896                 for a given self._current_task instance. This is useful
1897                 since calling poll() on a task can trigger advance to
1898                 the next task could eventually lead to the returncode
1899                 being set in cases when polling only a single task would
1900                 not have the same effect.
1901                 """
1902
1903                 prev = None
1904                 while True:
1905                         task = self._current_task
1906                         if task is None or task is prev:
1907                                 # don't poll the same task more than once
1908                                 break
1909                         task.poll()
1910                         prev = task
1911
1912                 return self.returncode
1913
1914         def _wait(self):
1915
1916                 prev = None
1917                 while True:
1918                         task = self._current_task
1919                         if task is None:
1920                                 # don't wait for the same task more than once
1921                                 break
1922                         if task is prev:
1923                                 # Before the task.wait() method returned, an exit
1924                                 # listener should have set self._current_task to either
1925                                 # a different task or None. Something is wrong.
1926                                 raise AssertionError("self._current_task has not " + \
1927                                         "changed since calling wait", self, task)
1928                         task.wait()
1929                         prev = task
1930
1931                 return self.returncode
1932
1933         def _assert_current(self, task):
1934                 """
1935                 Raises an AssertionError if the given task is not the
1936                 same one as self._current_task. This can be useful
1937                 for detecting bugs.
1938                 """
1939                 if task is not self._current_task:
1940                         raise AssertionError("Unrecognized task: %s" % (task,))
1941
1942         def _default_exit(self, task):
1943                 """
1944                 Calls _assert_current() on the given task and then sets the
1945                 composite returncode attribute if task.returncode != os.EX_OK.
1946                 If the task failed then self._current_task will be set to None.
1947                 Subclasses can use this as a generic task exit callback.
1948
1949                 @rtype: int
1950                 @returns: The task.returncode attribute.
1951                 """
1952                 self._assert_current(task)
1953                 if task.returncode != os.EX_OK:
1954                         self.returncode = task.returncode
1955                         self._current_task = None
1956                 return task.returncode
1957
1958         def _final_exit(self, task):
1959                 """
1960                 Assumes that task is the final task of this composite task.
1961                 Calls _default_exit() and sets self.returncode to the task's
1962                 returncode and sets self._current_task to None.
1963                 """
1964                 self._default_exit(task)
1965                 self._current_task = None
1966                 self.returncode = task.returncode
1967                 return self.returncode
1968
1969         def _default_final_exit(self, task):
1970                 """
1971                 This calls _final_exit() and then wait().
1972
1973                 Subclasses can use this as a generic final task exit callback.
1974
1975                 """
1976                 self._final_exit(task)
1977                 return self.wait()
1978
1979         def _start_task(self, task, exit_handler):
1980                 """
1981                 Register exit handler for the given task, set it
1982                 as self._current_task, and call task.start().
1983
1984                 Subclasses can use this as a generic way to start
1985                 a task.
1986
1987                 """
1988                 task.addExitListener(exit_handler)
1989                 self._current_task = task
1990                 task.start()
1991
1992 class TaskSequence(CompositeTask):
1993         """
1994         A collection of tasks that executes sequentially. Each task
1995         must have a addExitListener() method that can be used as
1996         a means to trigger movement from one task to the next.
1997         """
1998
1999         __slots__ = ("_task_queue",)
2000
2001         def __init__(self, **kwargs):
2002                 AsynchronousTask.__init__(self, **kwargs)
2003                 self._task_queue = deque()
2004
2005         def add(self, task):
2006                 self._task_queue.append(task)
2007
2008         def _start(self):
2009                 self._start_next_task()
2010
2011         def cancel(self):
2012                 self._task_queue.clear()
2013                 CompositeTask.cancel(self)
2014
2015         def _start_next_task(self):
2016                 self._start_task(self._task_queue.popleft(),
2017                         self._task_exit_handler)
2018
2019         def _task_exit_handler(self, task):
2020                 if self._default_exit(task) != os.EX_OK:
2021                         self.wait()
2022                 elif self._task_queue:
2023                         self._start_next_task()
2024                 else:
2025                         self._final_exit(task)
2026                         self.wait()
2027
2028 class SubProcess(AbstractPollTask):
2029
2030         __slots__ = ("pid",) + \
2031                 ("_files", "_reg_id")
2032
2033         # A file descriptor is required for the scheduler to monitor changes from
2034         # inside a poll() loop. When logging is not enabled, create a pipe just to
2035         # serve this purpose alone.
2036         _dummy_pipe_fd = 9
2037
2038         def _poll(self):
2039                 if self.returncode is not None:
2040                         return self.returncode
2041                 if self.pid is None:
2042                         return self.returncode
2043                 if self._registered:
2044                         return self.returncode
2045
2046                 try:
2047                         retval = os.waitpid(self.pid, os.WNOHANG)
2048                 except OSError, e:
2049                         if e.errno != errno.ECHILD:
2050                                 raise
2051                         del e
2052                         retval = (self.pid, 1)
2053
2054                 if retval == (0, 0):
2055                         return None
2056                 self._set_returncode(retval)
2057                 return self.returncode
2058
2059         def cancel(self):
2060                 if self.isAlive():
2061                         try:
2062                                 os.kill(self.pid, signal.SIGTERM)
2063                         except OSError, e:
2064                                 if e.errno != errno.ESRCH:
2065                                         raise
2066                                 del e
2067
2068                 self.cancelled = True
2069                 if self.pid is not None:
2070                         self.wait()
2071                 return self.returncode
2072
2073         def isAlive(self):
2074                 return self.pid is not None and \
2075                         self.returncode is None
2076
2077         def _wait(self):
2078
2079                 if self.returncode is not None:
2080                         return self.returncode
2081
2082                 if self._registered:
2083                         self.scheduler.schedule(self._reg_id)
2084                         self._unregister()
2085                         if self.returncode is not None:
2086                                 return self.returncode
2087
2088                 try:
2089                         wait_retval = os.waitpid(self.pid, 0)
2090                 except OSError, e:
2091                         if e.errno != errno.ECHILD:
2092                                 raise
2093                         del e
2094                         self._set_returncode((self.pid, 1))
2095                 else:
2096                         self._set_returncode(wait_retval)
2097
2098                 return self.returncode
2099
2100         def _unregister(self):
2101                 """
2102                 Unregister from the scheduler and close open files.
2103                 """
2104
2105                 self._registered = False
2106
2107                 if self._reg_id is not None:
2108                         self.scheduler.unregister(self._reg_id)
2109                         self._reg_id = None
2110
2111                 if self._files is not None:
2112                         for f in self._files.itervalues():
2113                                 f.close()
2114                         self._files = None
2115
2116         def _set_returncode(self, wait_retval):
2117
2118                 retval = wait_retval[1]
2119
2120                 if retval != os.EX_OK:
2121                         if retval & 0xff:
2122                                 retval = (retval & 0xff) << 8
2123                         else:
2124                                 retval = retval >> 8
2125
2126                 self.returncode = retval
2127
2128 class SpawnProcess(SubProcess):
2129
2130         """
2131         Constructor keyword args are passed into portage.process.spawn().
2132         The required "args" keyword argument will be passed as the first
2133         spawn() argument.
2134         """
2135
2136         _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2137                 "uid", "gid", "groups", "umask", "logfile",
2138                 "path_lookup", "pre_exec")
2139
2140         __slots__ = ("args",) + \
2141                 _spawn_kwarg_names
2142
2143         _file_names = ("log", "process", "stdout")
2144         _files_dict = slot_dict_class(_file_names, prefix="")
2145
2146         def _start(self):
2147
2148                 if self.cancelled:
2149                         return
2150
2151                 if self.fd_pipes is None:
2152                         self.fd_pipes = {}
2153                 fd_pipes = self.fd_pipes
2154                 fd_pipes.setdefault(0, sys.stdin.fileno())
2155                 fd_pipes.setdefault(1, sys.stdout.fileno())
2156                 fd_pipes.setdefault(2, sys.stderr.fileno())
2157
2158                 # flush any pending output
2159                 for fd in fd_pipes.itervalues():
2160                         if fd == sys.stdout.fileno():
2161                                 sys.stdout.flush()
2162                         if fd == sys.stderr.fileno():
2163                                 sys.stderr.flush()
2164
2165                 logfile = self.logfile
2166                 self._files = self._files_dict()
2167                 files = self._files
2168
2169                 master_fd, slave_fd = self._pipe(fd_pipes)
2170                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2171                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2172
2173                 null_input = None
2174                 fd_pipes_orig = fd_pipes.copy()
2175                 if self.background:
2176                         # TODO: Use job control functions like tcsetpgrp() to control
2177                         # access to stdin. Until then, use /dev/null so that any
2178                         # attempts to read from stdin will immediately return EOF
2179                         # instead of blocking indefinitely.
2180                         null_input = open('/dev/null', 'rb')
2181                         fd_pipes[0] = null_input.fileno()
2182                 else:
2183                         fd_pipes[0] = fd_pipes_orig[0]
2184
2185                 files.process = os.fdopen(master_fd, 'r')
2186                 if logfile is not None:
2187
2188                         fd_pipes[1] = slave_fd
2189                         fd_pipes[2] = slave_fd
2190
2191                         files.log = open(logfile, "a")
2192                         portage.util.apply_secpass_permissions(logfile,
2193                                 uid=portage.portage_uid, gid=portage.portage_gid,
2194                                 mode=0660)
2195
2196                         if not self.background:
2197                                 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'w')
2198
2199                         output_handler = self._output_handler
2200
2201                 else:
2202
2203                         # Create a dummy pipe so the scheduler can monitor
2204                         # the process from inside a poll() loop.
2205                         fd_pipes[self._dummy_pipe_fd] = slave_fd
2206                         if self.background:
2207                                 fd_pipes[1] = slave_fd
2208                                 fd_pipes[2] = slave_fd
2209                         output_handler = self._dummy_handler
2210
2211                 kwargs = {}
2212                 for k in self._spawn_kwarg_names:
2213                         v = getattr(self, k)
2214                         if v is not None:
2215                                 kwargs[k] = v
2216
2217                 kwargs["fd_pipes"] = fd_pipes
2218                 kwargs["returnpid"] = True
2219                 kwargs.pop("logfile", None)
2220
2221                 self._reg_id = self.scheduler.register(files.process.fileno(),
2222                         self._registered_events, output_handler)
2223                 self._registered = True
2224
2225                 retval = self._spawn(self.args, **kwargs)
2226
2227                 os.close(slave_fd)
2228                 if null_input is not None:
2229                         null_input.close()
2230
2231                 if isinstance(retval, int):
2232                         # spawn failed
2233                         self._unregister()
2234                         self.returncode = retval
2235                         self.wait()
2236                         return
2237
2238                 self.pid = retval[0]
2239                 portage.process.spawned_pids.remove(self.pid)
2240
2241         def _pipe(self, fd_pipes):
2242                 """
2243                 @type fd_pipes: dict
2244                 @param fd_pipes: pipes from which to copy terminal size if desired.
2245                 """
2246                 return os.pipe()
2247
2248         def _spawn(self, args, **kwargs):
2249                 return portage.process.spawn(args, **kwargs)
2250
2251         def _output_handler(self, fd, event):
2252
2253                 if event & PollConstants.POLLIN:
2254
2255                         files = self._files
2256                         buf = array.array('B')
2257                         try:
2258                                 buf.fromfile(files.process, self._bufsize)
2259                         except EOFError:
2260                                 pass
2261
2262                         if buf:
2263                                 if not self.background:
2264                                         buf.tofile(files.stdout)
2265                                         files.stdout.flush()
2266                                 buf.tofile(files.log)
2267                                 files.log.flush()
2268                         else:
2269                                 self._unregister()
2270                                 self.wait()
2271
2272                 self._unregister_if_appropriate(event)
2273                 return self._registered
2274
2275         def _dummy_handler(self, fd, event):
2276                 """
2277                 This method is mainly interested in detecting EOF, since
2278                 the only purpose of the pipe is to allow the scheduler to
2279                 monitor the process from inside a poll() loop.
2280                 """
2281
2282                 if event & PollConstants.POLLIN:
2283
2284                         buf = array.array('B')
2285                         try:
2286                                 buf.fromfile(self._files.process, self._bufsize)
2287                         except EOFError:
2288                                 pass
2289
2290                         if buf:
2291                                 pass
2292                         else:
2293                                 self._unregister()
2294                                 self.wait()
2295
2296                 self._unregister_if_appropriate(event)
2297                 return self._registered
2298
2299 class MiscFunctionsProcess(SpawnProcess):
2300         """
2301         Spawns misc-functions.sh with an existing ebuild environment.
2302         """
2303
2304         __slots__ = ("commands", "phase", "pkg", "settings")
2305
2306         def _start(self):
2307                 settings = self.settings
2308                 settings.pop("EBUILD_PHASE", None)
2309                 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2310                 misc_sh_binary = os.path.join(portage_bin_path,
2311                         os.path.basename(portage.const.MISC_SH_BINARY))
2312
2313                 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2314                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2315
2316                 portage._doebuild_exit_status_unlink(
2317                         settings.get("EBUILD_EXIT_STATUS_FILE"))
2318
2319                 SpawnProcess._start(self)
2320
2321         def _spawn(self, args, **kwargs):
2322                 settings = self.settings
2323                 debug = settings.get("PORTAGE_DEBUG") == "1"
2324                 return portage.spawn(" ".join(args), settings,
2325                         debug=debug, **kwargs)
2326
2327         def _set_returncode(self, wait_retval):
2328                 SpawnProcess._set_returncode(self, wait_retval)
2329                 self.returncode = portage._doebuild_exit_status_check_and_log(
2330                         self.settings, self.phase, self.returncode)
2331
2332 class EbuildFetcher(SpawnProcess):
2333
2334         __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2335                 ("_build_dir",)
2336
2337         def _start(self):
2338
2339                 root_config = self.pkg.root_config
2340                 portdb = root_config.trees["porttree"].dbapi
2341                 ebuild_path = portdb.findname(self.pkg.cpv)
2342                 settings = self.config_pool.allocate()
2343                 self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2344                 self._build_dir.lock()
2345                 self._build_dir.clean()
2346                 portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2347                 if self.logfile is None:
2348                         self.logfile = settings.get("PORTAGE_LOG_FILE")
2349
2350                 phase = "fetch"
2351                 if self.fetchall:
2352                         phase = "fetchall"
2353
2354                 # If any incremental variables have been overridden
2355                 # via the environment, those values need to be passed
2356                 # along here so that they are correctly considered by
2357                 # the config instance in the subproccess.
2358                 fetch_env = os.environ.copy()
2359
2360                 fetch_env["PORTAGE_NICENESS"] = "0"
2361                 if self.prefetch:
2362                         fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2363
2364                 ebuild_binary = os.path.join(
2365                         settings["PORTAGE_BIN_PATH"], "ebuild")
2366
2367                 fetch_args = [ebuild_binary, ebuild_path, phase]
2368                 debug = settings.get("PORTAGE_DEBUG") == "1"
2369                 if debug:
2370                         fetch_args.append("--debug")
2371
2372                 self.args = fetch_args
2373                 self.env = fetch_env
2374                 SpawnProcess._start(self)
2375
2376         def _pipe(self, fd_pipes):
2377                 """When appropriate, use a pty so that fetcher progress bars,
2378                 like wget has, will work properly."""
2379                 if self.background or not sys.stdout.isatty():
2380                         # When the output only goes to a log file,
2381                         # there's no point in creating a pty.
2382                         return os.pipe()
2383                 stdout_pipe = fd_pipes.get(1)
2384                 got_pty, master_fd, slave_fd = \
2385                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2386                 return (master_fd, slave_fd)
2387
2388         def _set_returncode(self, wait_retval):
2389                 SpawnProcess._set_returncode(self, wait_retval)
2390                 # Collect elog messages that might have been
2391                 # created by the pkg_nofetch phase.
2392                 if self._build_dir is not None:
2393                         # Skip elog messages for prefetch, in order to avoid duplicates.
2394                         if not self.prefetch and self.returncode != os.EX_OK:
2395                                 elog_out = None
2396                                 if self.logfile is not None:
2397                                         if self.background:
2398                                                 elog_out = open(self.logfile, 'a')
2399                                 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2400                                 if self.logfile is not None:
2401                                         msg += ", Log file:"
2402                                 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2403                                 if self.logfile is not None:
2404                                         eerror(" '%s'" % (self.logfile,),
2405                                                 phase="unpack", key=self.pkg.cpv, out=elog_out)
2406                                 if elog_out is not None:
2407                                         elog_out.close()
2408                         if not self.prefetch:
2409                                 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2410                         features = self._build_dir.settings.features
2411                         if self.returncode == os.EX_OK:
2412                                 self._build_dir.clean()
2413                         self._build_dir.unlock()
2414                         self.config_pool.deallocate(self._build_dir.settings)
2415                         self._build_dir = None
2416
2417 class EbuildBuildDir(SlotObject):
2418
2419         __slots__ = ("dir_path", "pkg", "settings",
2420                 "locked", "_catdir", "_lock_obj")
2421
2422         def __init__(self, **kwargs):
2423                 SlotObject.__init__(self, **kwargs)
2424                 self.locked = False
2425
2426         def lock(self):
2427                 """
2428                 This raises an AlreadyLocked exception if lock() is called
2429                 while a lock is already held. In order to avoid this, call
2430                 unlock() or check whether the "locked" attribute is True
2431                 or False before calling lock().
2432                 """
2433                 if self._lock_obj is not None:
2434                         raise self.AlreadyLocked((self._lock_obj,))
2435
2436                 dir_path = self.dir_path
2437                 if dir_path is None:
2438                         root_config = self.pkg.root_config
2439                         portdb = root_config.trees["porttree"].dbapi
2440                         ebuild_path = portdb.findname(self.pkg.cpv)
2441                         settings = self.settings
2442                         settings.setcpv(self.pkg)
2443                         debug = settings.get("PORTAGE_DEBUG") == "1"
2444                         use_cache = 1 # always true
2445                         portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2446                                 self.settings, debug, use_cache, portdb)
2447                         dir_path = self.settings["PORTAGE_BUILDDIR"]
2448
2449                 catdir = os.path.dirname(dir_path)
2450                 self._catdir = catdir
2451
2452                 portage.util.ensure_dirs(os.path.dirname(catdir),
2453                         gid=portage.portage_gid,
2454                         mode=070, mask=0)
2455                 catdir_lock = None
2456                 try:
2457                         catdir_lock = portage.locks.lockdir(catdir)
2458                         portage.util.ensure_dirs(catdir,
2459                                 gid=portage.portage_gid,
2460                                 mode=070, mask=0)
2461                         self._lock_obj = portage.locks.lockdir(dir_path)
2462                 finally:
2463                         self.locked = self._lock_obj is not None
2464                         if catdir_lock is not None:
2465                                 portage.locks.unlockdir(catdir_lock)
2466
2467         def clean(self):
2468                 """Uses shutil.rmtree() rather than spawning a 'clean' phase. Disabled
2469                 by keepwork or keeptemp in FEATURES."""
2470                 settings = self.settings
2471                 features = settings.features
2472                 if not ("keepwork" in features or "keeptemp" in features):
2473                         try:
2474                                 shutil.rmtree(settings["PORTAGE_BUILDDIR"])
2475                         except EnvironmentError, e:
2476                                 if e.errno != errno.ENOENT:
2477                                         raise
2478                                 del e
2479
2480         def unlock(self):
2481                 if self._lock_obj is None:
2482                         return
2483
2484                 portage.locks.unlockdir(self._lock_obj)
2485                 self._lock_obj = None
2486                 self.locked = False
2487
2488                 catdir = self._catdir
2489                 catdir_lock = None
2490                 try:
2491                         catdir_lock = portage.locks.lockdir(catdir)
2492                 finally:
2493                         if catdir_lock:
2494                                 try:
2495                                         os.rmdir(catdir)
2496                                 except OSError, e:
2497                                         if e.errno not in (errno.ENOENT,
2498                                                 errno.ENOTEMPTY, errno.EEXIST):
2499                                                 raise
2500                                         del e
2501                                 portage.locks.unlockdir(catdir_lock)
2502
2503         class AlreadyLocked(portage.exception.PortageException):
2504                 pass
2505
2506 class EbuildBuild(CompositeTask):
2507
2508         __slots__ = ("args_set", "config_pool", "find_blockers",
2509                 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2510                 "prefetcher", "settings", "world_atom") + \
2511                 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2512
2513         def _start(self):
2514
2515                 logger = self.logger
2516                 opts = self.opts
2517                 pkg = self.pkg
2518                 settings = self.settings
2519                 world_atom = self.world_atom
2520                 root_config = pkg.root_config
2521                 tree = "porttree"
2522                 self._tree = tree
2523                 portdb = root_config.trees[tree].dbapi
2524                 settings.setcpv(pkg)
2525                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2526                 ebuild_path = portdb.findname(self.pkg.cpv)
2527                 self._ebuild_path = ebuild_path
2528
2529                 prefetcher = self.prefetcher
2530                 if prefetcher is None:
2531                         pass
2532                 elif not prefetcher.isAlive():
2533                         prefetcher.cancel()
2534                 elif prefetcher.poll() is None:
2535
2536                         waiting_msg = "Fetching files " + \
2537                                 "in the background. " + \
2538                                 "To view fetch progress, run `tail -f " + \
2539                                 "/var/log/emerge-fetch.log` in another " + \
2540                                 "terminal."
2541                         msg_prefix = colorize("GOOD", " * ")
2542                         from textwrap import wrap
2543                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2544                                 for line in wrap(waiting_msg, 65))
2545                         if not self.background:
2546                                 writemsg(waiting_msg, noiselevel=-1)
2547
2548                         self._current_task = prefetcher
2549                         prefetcher.addExitListener(self._prefetch_exit)
2550                         return
2551
2552                 self._prefetch_exit(prefetcher)
2553
2554         def _prefetch_exit(self, prefetcher):
2555
2556                 opts = self.opts
2557                 pkg = self.pkg
2558                 settings = self.settings
2559
2560                 if opts.fetchonly:
2561                                 fetcher = EbuildFetchonly(
2562                                         fetch_all=opts.fetch_all_uri,
2563                                         pkg=pkg, pretend=opts.pretend,
2564                                         settings=settings)
2565                                 retval = fetcher.execute()
2566                                 self.returncode = retval
2567                                 self.wait()
2568                                 return
2569
2570                 fetcher = EbuildFetcher(config_pool=self.config_pool,
2571                         fetchall=opts.fetch_all_uri,
2572                         fetchonly=opts.fetchonly,
2573                         background=self.background,
2574                         pkg=pkg, scheduler=self.scheduler)
2575
2576                 self._start_task(fetcher, self._fetch_exit)
2577
2578         def _fetch_exit(self, fetcher):
2579                 opts = self.opts
2580                 pkg = self.pkg
2581
2582                 fetch_failed = False
2583                 if opts.fetchonly:
2584                         fetch_failed = self._final_exit(fetcher) != os.EX_OK
2585                 else:
2586                         fetch_failed = self._default_exit(fetcher) != os.EX_OK
2587
2588                 if fetch_failed and fetcher.logfile is not None and \
2589                         os.path.exists(fetcher.logfile):
2590                         self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2591
2592                 if not fetch_failed and fetcher.logfile is not None:
2593                         # Fetch was successful, so remove the fetch log.
2594                         try:
2595                                 os.unlink(fetcher.logfile)
2596                         except OSError:
2597                                 pass
2598
2599                 if fetch_failed or opts.fetchonly:
2600                         self.wait()
2601                         return
2602
2603                 logger = self.logger
2604                 opts = self.opts
2605                 pkg_count = self.pkg_count
2606                 scheduler = self.scheduler
2607                 settings = self.settings
2608                 features = settings.features
2609                 ebuild_path = self._ebuild_path
2610                 system_set = pkg.root_config.sets["system"]
2611
2612                 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2613                 self._build_dir.lock()
2614
2615                 # Cleaning is triggered before the setup
2616                 # phase, in portage.doebuild().
2617                 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2618                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2619                 short_msg = "emerge: (%s of %s) %s Clean" % \
2620                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2621                 logger.log(msg, short_msg=short_msg)
2622
2623                 #buildsyspkg: Check if we need to _force_ binary package creation
2624                 self._issyspkg = "buildsyspkg" in features and \
2625                                 system_set.findAtomForPackage(pkg) and \
2626                                 not opts.buildpkg
2627
2628                 if opts.buildpkg or self._issyspkg:
2629
2630                         self._buildpkg = True
2631
2632                         msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2633                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2634                         short_msg = "emerge: (%s of %s) %s Compile" % \
2635                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2636                         logger.log(msg, short_msg=short_msg)
2637
2638                 else:
2639                         msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2640                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2641                         short_msg = "emerge: (%s of %s) %s Compile" % \
2642                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2643                         logger.log(msg, short_msg=short_msg)
2644
2645                 build = EbuildExecuter(background=self.background, pkg=pkg,
2646                         scheduler=scheduler, settings=settings)
2647                 self._start_task(build, self._build_exit)
2648
2649         def _unlock_builddir(self):
2650                 portage.elog.elog_process(self.pkg.cpv, self.settings)
2651                 self._build_dir.unlock()
2652
2653         def _build_exit(self, build):
2654                 if self._default_exit(build) != os.EX_OK:
2655                         self._unlock_builddir()
2656                         self.wait()
2657                         return
2658
2659                 opts = self.opts
2660                 buildpkg = self._buildpkg
2661
2662                 if not buildpkg:
2663                         self._final_exit(build)
2664                         self.wait()
2665                         return
2666
2667                 if self._issyspkg:
2668                         msg = ">>> This is a system package, " + \
2669                                 "let's pack a rescue tarball.\n"
2670
2671                         log_path = self.settings.get("PORTAGE_LOG_FILE")
2672                         if log_path is not None:
2673                                 log_file = open(log_path, 'a')
2674                                 try:
2675                                         log_file.write(msg)
2676                                 finally:
2677                                         log_file.close()
2678
2679                         if not self.background:
2680                                 portage.writemsg_stdout(msg, noiselevel=-1)
2681
2682                 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2683                         scheduler=self.scheduler, settings=self.settings)
2684
2685                 self._start_task(packager, self._buildpkg_exit)
2686
2687         def _buildpkg_exit(self, packager):
2688                 """
2689                 Released build dir lock when there is a failure or
2690                 when in buildpkgonly mode. Otherwise, the lock will
2691                 be released when merge() is called.
2692                 """
2693
2694                 if self._default_exit(packager) != os.EX_OK:
2695                         self._unlock_builddir()
2696                         self.wait()
2697                         return
2698
2699                 if self.opts.buildpkgonly:
2700                         # Need to call "clean" phase for buildpkgonly mode
2701                         portage.elog.elog_process(self.pkg.cpv, self.settings)
2702                         phase = "clean"
2703                         clean_phase = EbuildPhase(background=self.background,
2704                                 pkg=self.pkg, phase=phase,
2705                                 scheduler=self.scheduler, settings=self.settings,
2706                                 tree=self._tree)
2707                         self._start_task(clean_phase, self._clean_exit)
2708                         return
2709
2710                 # Continue holding the builddir lock until
2711                 # after the package has been installed.
2712                 self._current_task = None
2713                 self.returncode = packager.returncode
2714                 self.wait()
2715
2716         def _clean_exit(self, clean_phase):
2717                 if self._final_exit(clean_phase) != os.EX_OK or \
2718                         self.opts.buildpkgonly:
2719                         self._unlock_builddir()
2720                 self.wait()
2721
2722         def install(self):
2723                 """
2724                 Install the package and then clean up and release locks.
2725                 Only call this after the build has completed successfully
2726                 and neither fetchonly nor buildpkgonly mode are enabled.
2727                 """
2728
2729                 find_blockers = self.find_blockers
2730                 ldpath_mtimes = self.ldpath_mtimes
2731                 logger = self.logger
2732                 pkg = self.pkg
2733                 pkg_count = self.pkg_count
2734                 settings = self.settings
2735                 world_atom = self.world_atom
2736                 ebuild_path = self._ebuild_path
2737                 tree = self._tree
2738
2739                 merge = EbuildMerge(find_blockers=self.find_blockers,
2740                         ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2741                         pkg_count=pkg_count, pkg_path=ebuild_path,
2742                         scheduler=self.scheduler,
2743                         settings=settings, tree=tree, world_atom=world_atom)
2744
2745                 msg = " === (%s of %s) Merging (%s::%s)" % \
2746                         (pkg_count.curval, pkg_count.maxval,
2747                         pkg.cpv, ebuild_path)
2748                 short_msg = "emerge: (%s of %s) %s Merge" % \
2749                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2750                 logger.log(msg, short_msg=short_msg)
2751
2752                 try:
2753                         rval = merge.execute()
2754                 finally:
2755                         self._unlock_builddir()
2756
2757                 return rval
2758
2759 class EbuildExecuter(CompositeTask):
2760
2761         __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2762
2763         _phases = ("prepare", "configure", "compile", "test", "install")
2764
2765         _live_eclasses = frozenset([
2766                 "bzr",
2767                 "cvs",
2768                 "darcs",
2769                 "git",
2770                 "mercurial",
2771                 "subversion"
2772         ])
2773
2774         def _start(self):
2775                 self._tree = "porttree"
2776                 pkg = self.pkg
2777                 phase = "clean"
2778                 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2779                         scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2780                 self._start_task(clean_phase, self._clean_phase_exit)
2781
2782         def _clean_phase_exit(self, clean_phase):
2783
2784                 if self._default_exit(clean_phase) != os.EX_OK:
2785                         self.wait()
2786                         return
2787
2788                 pkg = self.pkg
2789                 scheduler = self.scheduler
2790                 settings = self.settings
2791                 cleanup = 1
2792
2793                 # This initializes PORTAGE_LOG_FILE.
2794                 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2795
2796                 setup_phase = EbuildPhase(background=self.background,
2797                         pkg=pkg, phase="setup", scheduler=scheduler,
2798                         settings=settings, tree=self._tree)
2799
2800                 setup_phase.addExitListener(self._setup_exit)
2801                 self._current_task = setup_phase
2802                 self.scheduler.scheduleSetup(setup_phase)
2803
2804         def _setup_exit(self, setup_phase):
2805
2806                 if self._default_exit(setup_phase) != os.EX_OK:
2807                         self.wait()
2808                         return
2809
2810                 unpack_phase = EbuildPhase(background=self.background,
2811                         pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2812                         settings=self.settings, tree=self._tree)
2813
2814                 if self._live_eclasses.intersection(self.pkg.inherited):
2815                         # Serialize $DISTDIR access for live ebuilds since
2816                         # otherwise they can interfere with eachother.
2817
2818                         unpack_phase.addExitListener(self._unpack_exit)
2819                         self._current_task = unpack_phase
2820                         self.scheduler.scheduleUnpack(unpack_phase)
2821
2822                 else:
2823                         self._start_task(unpack_phase, self._unpack_exit)
2824
2825         def _unpack_exit(self, unpack_phase):
2826
2827                 if self._default_exit(unpack_phase) != os.EX_OK:
2828                         self.wait()
2829                         return
2830
2831                 ebuild_phases = TaskSequence(scheduler=self.scheduler)
2832
2833                 pkg = self.pkg
2834                 phases = self._phases
2835                 eapi = pkg.metadata["EAPI"]
2836                 if eapi in ("0", "1", "2_pre1"):
2837                         # skip src_prepare and src_configure
2838                         phases = phases[2:]
2839                 elif eapi in ("2_pre2",):
2840                         # skip src_prepare
2841                         phases = phases[1:]
2842
2843                 for phase in phases:
2844                         ebuild_phases.add(EbuildPhase(background=self.background,
2845                                 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
2846                                 settings=self.settings, tree=self._tree))
2847
2848                 self._start_task(ebuild_phases, self._default_final_exit)
2849
2850 class EbuildMetadataPhase(SubProcess):
2851
2852         """
2853         Asynchronous interface for the ebuild "depend" phase which is
2854         used to extract metadata from the ebuild.
2855         """
2856
2857         __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
2858                 "ebuild_mtime", "portdb", "repo_path", "settings") + \
2859                 ("_raw_metadata",)
2860
2861         _file_names = ("ebuild",)
2862         _files_dict = slot_dict_class(_file_names, prefix="")
2863         _metadata_fd = 9
2864
2865         def _start(self):
2866                 settings = self.settings
2867                 settings.reset()
2868                 ebuild_path = self.ebuild_path
2869                 debug = settings.get("PORTAGE_DEBUG") == "1"
2870                 master_fd = None
2871                 slave_fd = None
2872                 fd_pipes = None
2873                 if self.fd_pipes is not None:
2874                         fd_pipes = self.fd_pipes.copy()
2875                 else:
2876                         fd_pipes = {}
2877
2878                 fd_pipes.setdefault(0, sys.stdin.fileno())
2879                 fd_pipes.setdefault(1, sys.stdout.fileno())
2880                 fd_pipes.setdefault(2, sys.stderr.fileno())
2881
2882                 # flush any pending output
2883                 for fd in fd_pipes.itervalues():
2884                         if fd == sys.stdout.fileno():
2885                                 sys.stdout.flush()
2886                         if fd == sys.stderr.fileno():
2887                                 sys.stderr.flush()
2888
2889                 fd_pipes_orig = fd_pipes.copy()
2890                 self._files = self._files_dict()
2891                 files = self._files
2892
2893                 master_fd, slave_fd = os.pipe()
2894                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2895                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2896
2897                 fd_pipes[self._metadata_fd] = slave_fd
2898
2899                 self._raw_metadata = []
2900                 files.ebuild = os.fdopen(master_fd, 'r')
2901                 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
2902                         self._registered_events, self._output_handler)
2903                 self._registered = True
2904
2905                 retval = portage.doebuild(ebuild_path, "depend",
2906                         settings["ROOT"], settings, debug,
2907                         mydbapi=self.portdb, tree="porttree",
2908                         fd_pipes=fd_pipes, returnpid=True)
2909
2910                 os.close(slave_fd)
2911
2912                 if isinstance(retval, int):
2913                         # doebuild failed before spawning
2914                         self._unregister()
2915                         self.returncode = retval
2916                         self.wait()
2917                         return
2918
2919                 self.pid = retval[0]
2920                 portage.process.spawned_pids.remove(self.pid)
2921
2922         def _output_handler(self, fd, event):
2923
2924                 if event & PollConstants.POLLIN:
2925                         self._raw_metadata.append(self._files.ebuild.read())
2926                         if not self._raw_metadata[-1]:
2927                                 self._unregister()
2928                                 self.wait()
2929
2930                 self._unregister_if_appropriate(event)
2931                 return self._registered
2932
2933         def _set_returncode(self, wait_retval):
2934                 SubProcess._set_returncode(self, wait_retval)
2935                 if self.returncode == os.EX_OK:
2936                         metadata_lines = "".join(self._raw_metadata).splitlines()
2937                         if len(portage.auxdbkeys) != len(metadata_lines):
2938                                 # Don't trust bash's returncode if the
2939                                 # number of lines is incorrect.
2940                                 self.returncode = 1
2941                         else:
2942                                 metadata = izip(portage.auxdbkeys, metadata_lines)
2943                                 self.metadata_callback(self.cpv, self.ebuild_path,
2944                                         self.repo_path, metadata, self.ebuild_mtime)
2945
2946 class EbuildProcess(SpawnProcess):
2947
2948         __slots__ = ("phase", "pkg", "settings", "tree")
2949
2950         def _start(self):
2951                 # Don't open the log file during the clean phase since the
2952                 # open file can result in an nfs lock on $T/build.log which
2953                 # prevents the clean phase from removing $T.
2954                 if self.phase not in ("clean", "cleanrm"):
2955                         self.logfile = self.settings.get("PORTAGE_LOG_FILE")
2956                 SpawnProcess._start(self)
2957
2958         def _pipe(self, fd_pipes):
2959                 stdout_pipe = fd_pipes.get(1)
2960                 got_pty, master_fd, slave_fd = \
2961                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2962                 return (master_fd, slave_fd)
2963
2964         def _spawn(self, args, **kwargs):
2965
2966                 root_config = self.pkg.root_config
2967                 tree = self.tree
2968                 mydbapi = root_config.trees[tree].dbapi
2969                 settings = self.settings
2970                 ebuild_path = settings["EBUILD"]
2971                 debug = settings.get("PORTAGE_DEBUG") == "1"
2972
2973                 rval = portage.doebuild(ebuild_path, self.phase,
2974                         root_config.root, settings, debug,
2975                         mydbapi=mydbapi, tree=tree, **kwargs)
2976
2977                 return rval
2978
2979         def _set_returncode(self, wait_retval):
2980                 SpawnProcess._set_returncode(self, wait_retval)
2981
2982                 if self.phase not in ("clean", "cleanrm"):
2983                         self.returncode = portage._doebuild_exit_status_check_and_log(
2984                                 self.settings, self.phase, self.returncode)
2985
2986                 if self.phase == "test" and self.returncode != os.EX_OK and \
2987                         "test-fail-continue" in self.settings.features:
2988                         self.returncode = os.EX_OK
2989
2990                 portage._post_phase_userpriv_perms(self.settings)
2991
2992 class EbuildPhase(CompositeTask):
2993
2994         __slots__ = ("background", "pkg", "phase",
2995                 "scheduler", "settings", "tree")
2996
2997         _post_phase_cmds = portage._post_phase_cmds
2998
2999         def _start(self):
3000
3001                 ebuild_process = EbuildProcess(background=self.background,
3002                         pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3003                         settings=self.settings, tree=self.tree)
3004
3005                 self._start_task(ebuild_process, self._ebuild_exit)
3006
3007         def _ebuild_exit(self, ebuild_process):
3008
3009                 if self.phase == "install":
3010                         out = None
3011                         log_path = self.settings.get("PORTAGE_LOG_FILE")
3012                         log_file = None
3013                         if self.background and log_path is not None:
3014                                 log_file = open(log_path, 'a')
3015                                 out = log_file
3016                         try:
3017                                 portage._check_build_log(self.settings, out=out)
3018                         finally:
3019                                 if log_file is not None:
3020                                         log_file.close()
3021
3022                 if self._default_exit(ebuild_process) != os.EX_OK:
3023                         self.wait()
3024                         return
3025
3026                 settings = self.settings
3027
3028                 if self.phase == "install":
3029                         portage._post_src_install_uid_fix(settings)
3030
3031                 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3032                 if post_phase_cmds is not None:
3033                         post_phase = MiscFunctionsProcess(background=self.background,
3034                                 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3035                                 scheduler=self.scheduler, settings=settings)
3036                         self._start_task(post_phase, self._post_phase_exit)
3037                         return
3038
3039                 self.returncode = ebuild_process.returncode
3040                 self._current_task = None
3041                 self.wait()
3042
3043         def _post_phase_exit(self, post_phase):
3044                 if self._final_exit(post_phase) != os.EX_OK:
3045                         writemsg("!!! post %s failed; exiting.\n" % self.phase,
3046                                 noiselevel=-1)
3047                 self._current_task = None
3048                 self.wait()
3049                 return
3050
3051 class EbuildBinpkg(EbuildProcess):
3052         """
3053         This assumes that src_install() has successfully completed.
3054         """
3055         __slots__ = ("_binpkg_tmpfile",)
3056
3057         def _start(self):
3058                 self.phase = "package"
3059                 self.tree = "porttree"
3060                 pkg = self.pkg
3061                 root_config = pkg.root_config
3062                 portdb = root_config.trees["porttree"].dbapi
3063                 bintree = root_config.trees["bintree"]
3064                 ebuild_path = portdb.findname(self.pkg.cpv)
3065                 settings = self.settings
3066                 debug = settings.get("PORTAGE_DEBUG") == "1"
3067
3068                 bintree.prevent_collision(pkg.cpv)
3069                 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3070                         pkg.cpv + ".tbz2." + str(os.getpid()))
3071                 self._binpkg_tmpfile = binpkg_tmpfile
3072                 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3073                 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3074
3075                 try:
3076                         EbuildProcess._start(self)
3077                 finally:
3078                         settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3079
3080         def _set_returncode(self, wait_retval):
3081                 EbuildProcess._set_returncode(self, wait_retval)
3082
3083                 pkg = self.pkg
3084                 bintree = pkg.root_config.trees["bintree"]
3085                 binpkg_tmpfile = self._binpkg_tmpfile
3086                 if self.returncode == os.EX_OK:
3087                         bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3088
3089 class EbuildMerge(SlotObject):
3090
3091         __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3092                 "pkg", "pkg_count", "pkg_path", "pretend",
3093                 "scheduler", "settings", "tree", "world_atom")
3094
3095         def execute(self):
3096                 root_config = self.pkg.root_config
3097                 settings = self.settings
3098                 retval = portage.merge(settings["CATEGORY"],
3099                         settings["PF"], settings["D"],
3100                         os.path.join(settings["PORTAGE_BUILDDIR"],
3101                         "build-info"), root_config.root, settings,
3102                         myebuild=settings["EBUILD"],
3103                         mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3104                         vartree=root_config.trees["vartree"],
3105                         prev_mtimes=self.ldpath_mtimes,
3106                         scheduler=self.scheduler,
3107                         blockers=self.find_blockers)
3108
3109                 if retval == os.EX_OK:
3110                         self.world_atom(self.pkg)
3111                         self._log_success()
3112
3113                 return retval
3114
3115         def _log_success(self):
3116                 pkg = self.pkg
3117                 pkg_count = self.pkg_count
3118                 pkg_path = self.pkg_path
3119                 logger = self.logger
3120                 if "noclean" not in self.settings.features:
3121                         short_msg = "emerge: (%s of %s) %s Clean Post" % \
3122                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3123                         logger.log((" === (%s of %s) " + \
3124                                 "Post-Build Cleaning (%s::%s)") % \
3125                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3126                                 short_msg=short_msg)
3127                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3128                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3129
3130 class PackageUninstall(AsynchronousTask):
3131
3132         __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3133
3134         def _start(self):
3135                 try:
3136                         unmerge(self.pkg.root_config, self.opts, "unmerge",
3137                                 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3138                                 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3139                                 writemsg_level=self._writemsg_level)
3140                 except UninstallFailure, e:
3141                         self.returncode = e.status
3142                 else:
3143                         self.returncode = os.EX_OK
3144                 self.wait()
3145
3146         def _writemsg_level(self, msg, level=0, noiselevel=0):
3147
3148                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3149                 background = self.background
3150
3151                 if log_path is None:
3152                         if not (background and level < logging.WARNING):
3153                                 portage.util.writemsg_level(msg,
3154                                         level=level, noiselevel=noiselevel)
3155                 else:
3156                         if not background:
3157                                 portage.util.writemsg_level(msg,
3158                                         level=level, noiselevel=noiselevel)
3159
3160                         f = open(log_path, 'a')
3161                         try:
3162                                 f.write(msg)
3163                         finally:
3164                                 f.close()
3165
3166 class Binpkg(CompositeTask):
3167
3168         __slots__ = ("find_blockers",
3169                 "ldpath_mtimes", "logger", "opts",
3170                 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3171                 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3172                 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3173
3174         def _writemsg_level(self, msg, level=0, noiselevel=0):
3175
3176                 if not self.background:
3177                         portage.util.writemsg_level(msg,
3178                                 level=level, noiselevel=noiselevel)
3179
3180                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3181                 if  log_path is not None:
3182                         f = open(log_path, 'a')
3183                         try:
3184                                 f.write(msg)
3185                         finally:
3186                                 f.close()
3187
3188         def _start(self):
3189
3190                 pkg = self.pkg
3191                 settings = self.settings
3192                 settings.setcpv(pkg)
3193                 self._tree = "bintree"
3194                 self._bintree = self.pkg.root_config.trees[self._tree]
3195                 self._verify = not self.opts.pretend
3196
3197                 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3198                         "portage", pkg.category, pkg.pf)
3199                 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3200                         pkg=pkg, settings=settings)
3201                 self._image_dir = os.path.join(dir_path, "image")
3202                 self._infloc = os.path.join(dir_path, "build-info")
3203                 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3204                 settings["EBUILD"] = self._ebuild_path
3205                 debug = settings.get("PORTAGE_DEBUG") == "1"
3206                 portage.doebuild_environment(self._ebuild_path, "setup",
3207                         settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3208                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3209
3210                 # The prefetcher has already completed or it
3211                 # could be running now. If it's running now,
3212                 # wait for it to complete since it holds
3213                 # a lock on the file being fetched. The
3214                 # portage.locks functions are only designed
3215                 # to work between separate processes. Since
3216                 # the lock is held by the current process,
3217                 # use the scheduler and fetcher methods to
3218                 # synchronize with the fetcher.
3219                 prefetcher = self.prefetcher
3220                 if prefetcher is None:
3221                         pass
3222                 elif not prefetcher.isAlive():
3223                         prefetcher.cancel()
3224                 elif prefetcher.poll() is None:
3225
3226                         waiting_msg = ("Fetching '%s' " + \
3227                                 "in the background. " + \
3228                                 "To view fetch progress, run `tail -f " + \
3229                                 "/var/log/emerge-fetch.log` in another " + \
3230                                 "terminal.") % prefetcher.pkg_path
3231                         msg_prefix = colorize("GOOD", " * ")
3232                         from textwrap import wrap
3233                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3234                                 for line in wrap(waiting_msg, 65))
3235                         if not self.background:
3236                                 writemsg(waiting_msg, noiselevel=-1)
3237
3238                         self._current_task = prefetcher
3239                         prefetcher.addExitListener(self._prefetch_exit)
3240                         return
3241
3242                 self._prefetch_exit(prefetcher)
3243
3244         def _prefetch_exit(self, prefetcher):
3245
3246                 pkg = self.pkg
3247                 pkg_count = self.pkg_count
3248                 if not (self.opts.pretend or self.opts.fetchonly):
3249                         self._build_dir.lock()
3250                         try:
3251                                 shutil.rmtree(self._build_dir.dir_path)
3252                         except EnvironmentError, e:
3253                                 if e.errno != errno.ENOENT:
3254                                         raise
3255                                 del e
3256                         portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3257                 fetcher = BinpkgFetcher(background=self.background,
3258                         logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3259                         pretend=self.opts.pretend, scheduler=self.scheduler)
3260                 pkg_path = fetcher.pkg_path
3261                 self._pkg_path = pkg_path
3262
3263                 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3264
3265                         msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3266                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3267                         short_msg = "emerge: (%s of %s) %s Fetch" % \
3268                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3269                         self.logger.log(msg, short_msg=short_msg)
3270                         self._start_task(fetcher, self._fetcher_exit)
3271                         return
3272
3273                 self._fetcher_exit(fetcher)
3274
3275         def _fetcher_exit(self, fetcher):
3276
3277                 # The fetcher only has a returncode when
3278                 # --getbinpkg is enabled.
3279                 if fetcher.returncode is not None:
3280                         self._fetched_pkg = True
3281                         if self._default_exit(fetcher) != os.EX_OK:
3282                                 self._unlock_builddir()
3283                                 self.wait()
3284                                 return
3285
3286                 if self.opts.pretend:
3287                         self._current_task = None
3288                         self.returncode = os.EX_OK
3289                         self.wait()
3290                         return
3291
3292                 verifier = None
3293                 if self._verify:
3294                         logfile = None
3295                         if self.background:
3296                                 logfile = self.settings.get("PORTAGE_LOG_FILE")
3297                         verifier = BinpkgVerifier(background=self.background,
3298                                 logfile=logfile, pkg=self.pkg)
3299                         self._start_task(verifier, self._verifier_exit)
3300                         return
3301
3302                 self._verifier_exit(verifier)
3303
3304         def _verifier_exit(self, verifier):
3305                 if verifier is not None and \
3306                         self._default_exit(verifier) != os.EX_OK:
3307                         self._unlock_builddir()
3308                         self.wait()
3309                         return
3310
3311                 logger = self.logger
3312                 pkg = self.pkg
3313                 pkg_count = self.pkg_count
3314                 pkg_path = self._pkg_path
3315
3316                 if self._fetched_pkg:
3317                         self._bintree.inject(pkg.cpv, filename=pkg_path)
3318
3319                 if self.opts.fetchonly:
3320                         self._current_task = None
3321                         self.returncode = os.EX_OK
3322                         self.wait()
3323                         return
3324
3325                 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3326                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3327                 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3328                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3329                 logger.log(msg, short_msg=short_msg)
3330
3331                 phase = "clean"
3332                 settings = self.settings
3333                 ebuild_phase = EbuildPhase(background=self.background,
3334                         pkg=pkg, phase=phase, scheduler=self.scheduler,
3335                         settings=settings, tree=self._tree)
3336
3337                 self._start_task(ebuild_phase, self._clean_exit)
3338
3339         def _clean_exit(self, clean_phase):
3340                 if self._default_exit(clean_phase) != os.EX_OK:
3341                         self._unlock_builddir()
3342                         self.wait()
3343                         return
3344
3345                 dir_path = self._build_dir.dir_path
3346
3347                 try:
3348                         shutil.rmtree(dir_path)
3349                 except (IOError, OSError), e:
3350                         if e.errno != errno.ENOENT:
3351                                 raise
3352                         del e
3353
3354                 infloc = self._infloc
3355                 pkg = self.pkg
3356                 pkg_path = self._pkg_path
3357
3358                 dir_mode = 0755
3359                 for mydir in (dir_path, self._image_dir, infloc):
3360                         portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3361                                 gid=portage.data.portage_gid, mode=dir_mode)
3362
3363                 # This initializes PORTAGE_LOG_FILE.
3364                 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3365                 self._writemsg_level(">>> Extracting info\n")
3366
3367                 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3368                 check_missing_metadata = ("CATEGORY", "PF")
3369                 missing_metadata = set()
3370                 for k in check_missing_metadata:
3371                         v = pkg_xpak.getfile(k)
3372                         if not v:
3373                                 missing_metadata.add(k)
3374
3375                 pkg_xpak.unpackinfo(infloc)
3376                 for k in missing_metadata:
3377                         if k == "CATEGORY":
3378                                 v = pkg.category
3379                         elif k == "PF":
3380                                 v = pkg.pf
3381                         else:
3382                                 continue
3383
3384                         f = open(os.path.join(infloc, k), 'wb')
3385                         try:
3386                                 f.write(v + "\n")
3387                         finally:
3388                                 f.close()
3389
3390                 # Store the md5sum in the vdb.
3391                 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3392                 try:
3393                         f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3394                 finally:
3395                         f.close()
3396
3397                 # This gives bashrc users an opportunity to do various things
3398                 # such as remove binary packages after they're installed.
3399                 settings = self.settings
3400                 settings.setcpv(self.pkg)
3401                 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3402                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3403
3404                 phase = "setup"
3405                 setup_phase = EbuildPhase(background=self.background,
3406                         pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3407                         settings=settings, tree=self._tree)
3408
3409                 setup_phase.addExitListener(self._setup_exit)
3410                 self._current_task = setup_phase
3411                 self.scheduler.scheduleSetup(setup_phase)
3412
3413         def _setup_exit(self, setup_phase):
3414                 if self._default_exit(setup_phase) != os.EX_OK:
3415                         self._unlock_builddir()
3416                         self.wait()
3417                         return
3418
3419                 extractor = BinpkgExtractorAsync(background=self.background,
3420                         image_dir=self._image_dir,
3421                         pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3422                 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3423                 self._start_task(extractor, self._extractor_exit)
3424
3425         def _extractor_exit(self, extractor):
3426                 if self._final_exit(extractor) != os.EX_OK:
3427                         self._unlock_builddir()
3428                         writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3429                                 noiselevel=-1)
3430                 self.wait()
3431
3432         def _unlock_builddir(self):
3433                 if self.opts.pretend or self.opts.fetchonly:
3434                         return
3435                 portage.elog.elog_process(self.pkg.cpv, self.settings)
3436                 self._build_dir.unlock()
3437
3438         def install(self):
3439
3440                 # This gives bashrc users an opportunity to do various things
3441                 # such as remove binary packages after they're installed.
3442                 settings = self.settings
3443                 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3444                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3445
3446                 merge = EbuildMerge(find_blockers=self.find_blockers,
3447                         ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3448                         pkg=self.pkg, pkg_count=self.pkg_count,
3449                         pkg_path=self._pkg_path, scheduler=self.scheduler,
3450                         settings=settings, tree=self._tree, world_atom=self.world_atom)
3451
3452                 try:
3453                         retval = merge.execute()
3454                 finally:
3455                         settings.pop("PORTAGE_BINPKG_FILE", None)
3456                         self._unlock_builddir()
3457                 return retval
3458
3459 class BinpkgFetcher(SpawnProcess):
3460
3461         __slots__ = ("pkg", "pretend",
3462                 "locked", "pkg_path", "_lock_obj")
3463
3464         def __init__(self, **kwargs):
3465                 SpawnProcess.__init__(self, **kwargs)
3466                 pkg = self.pkg
3467                 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3468
3469         def _start(self):
3470
3471                 if self.cancelled:
3472                         return
3473
3474                 pkg = self.pkg
3475                 pretend = self.pretend
3476                 bintree = pkg.root_config.trees["bintree"]
3477                 settings = bintree.settings
3478                 use_locks = "distlocks" in settings.features
3479                 pkg_path = self.pkg_path
3480
3481                 if not pretend:
3482                         portage.util.ensure_dirs(os.path.dirname(pkg_path))
3483                         if use_locks:
3484                                 self.lock()
3485                 exists = os.path.exists(pkg_path)
3486                 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3487                 if not (pretend or resume):
3488                         # Remove existing file or broken symlink.
3489                         try:
3490                                 os.unlink(pkg_path)
3491                         except OSError:
3492                                 pass
3493
3494                 # urljoin doesn't work correctly with
3495                 # unrecognized protocols like sftp
3496                 if bintree._remote_has_index:
3497                         rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3498                         if not rel_uri:
3499                                 rel_uri = pkg.cpv + ".tbz2"
3500                         uri = bintree._remote_base_uri.rstrip("/") + \
3501                                 "/" + rel_uri.lstrip("/")
3502                 else:
3503                         uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3504                                 "/" + pkg.pf + ".tbz2"
3505
3506                 if pretend:
3507                         portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3508                         self.returncode = os.EX_OK
3509                         self.wait()
3510                         return
3511
3512                 protocol = urlparse.urlparse(uri)[0]
3513                 fcmd_prefix = "FETCHCOMMAND"
3514                 if resume:
3515                         fcmd_prefix = "RESUMECOMMAND"
3516                 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3517                 if not fcmd:
3518                         fcmd = settings.get(fcmd_prefix)
3519
3520                 fcmd_vars = {
3521                         "DISTDIR" : os.path.dirname(pkg_path),
3522                         "URI"     : uri,
3523                         "FILE"    : os.path.basename(pkg_path)
3524                 }
3525
3526                 fetch_env = dict(settings.iteritems())
3527                 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3528                         for x in shlex.split(fcmd)]
3529
3530                 if self.fd_pipes is None:
3531                         self.fd_pipes = {}
3532                 fd_pipes = self.fd_pipes
3533
3534                 # Redirect all output to stdout since some fetchers like
3535                 # wget pollute stderr (if portage detects a problem then it
3536                 # can send it's own message to stderr).
3537                 fd_pipes.setdefault(0, sys.stdin.fileno())
3538                 fd_pipes.setdefault(1, sys.stdout.fileno())
3539                 fd_pipes.setdefault(2, sys.stdout.fileno())
3540
3541                 self.args = fetch_args
3542                 self.env = fetch_env
3543                 SpawnProcess._start(self)
3544
3545         def _set_returncode(self, wait_retval):
3546                 SpawnProcess._set_returncode(self, wait_retval)
3547                 if self.returncode == os.EX_OK:
3548                         # If possible, update the mtime to match the remote package if
3549                         # the fetcher didn't already do it automatically.
3550                         bintree = self.pkg.root_config.trees["bintree"]
3551                         if bintree._remote_has_index:
3552                                 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3553                                 if remote_mtime is not None:
3554                                         try:
3555                                                 remote_mtime = long(remote_mtime)
3556                                         except ValueError:
3557                                                 pass
3558                                         else:
3559                                                 try:
3560                                                         local_mtime = long(os.stat(self.pkg_path).st_mtime)
3561                                                 except OSError:
3562                                                         pass
3563                                                 else:
3564                                                         if remote_mtime != local_mtime:
3565                                                                 try:
3566                                                                         os.utime(self.pkg_path,
3567                                                                                 (remote_mtime, remote_mtime))
3568                                                                 except OSError:
3569                                                                         pass
3570
3571                 if self.locked:
3572                         self.unlock()
3573
3574         def lock(self):
3575                 """
3576                 This raises an AlreadyLocked exception if lock() is called
3577                 while a lock is already held. In order to avoid this, call
3578                 unlock() or check whether the "locked" attribute is True
3579                 or False before calling lock().
3580                 """
3581                 if self._lock_obj is not None:
3582                         raise self.AlreadyLocked((self._lock_obj,))
3583
3584                 self._lock_obj = portage.locks.lockfile(
3585                         self.pkg_path, wantnewlockfile=1)
3586                 self.locked = True
3587
3588         class AlreadyLocked(portage.exception.PortageException):
3589                 pass
3590
3591         def unlock(self):
3592                 if self._lock_obj is None:
3593                         return
3594                 portage.locks.unlockfile(self._lock_obj)
3595                 self._lock_obj = None
3596                 self.locked = False
3597
3598 class BinpkgVerifier(AsynchronousTask):
3599         __slots__ = ("logfile", "pkg",)
3600
3601         def _start(self):
3602                 """
3603                 Note: Unlike a normal AsynchronousTask.start() method,
3604                 this one does all work is synchronously. The returncode
3605                 attribute will be set before it returns.
3606                 """
3607
3608                 pkg = self.pkg
3609                 root_config = pkg.root_config
3610                 bintree = root_config.trees["bintree"]
3611                 rval = os.EX_OK
3612                 stdout_orig = sys.stdout
3613                 stderr_orig = sys.stderr
3614                 log_file = None
3615                 if self.background and self.logfile is not None:
3616                         log_file = open(self.logfile, 'a')
3617                 try:
3618                         if log_file is not None:
3619                                 sys.stdout = log_file
3620                                 sys.stderr = log_file
3621                         try:
3622                                 bintree.digestCheck(pkg)
3623                         except portage.exception.FileNotFound:
3624                                 writemsg("!!! Fetching Binary failed " + \
3625                                         "for '%s'\n" % pkg.cpv, noiselevel=-1)
3626                                 rval = 1
3627                         except portage.exception.DigestException, e:
3628                                 writemsg("\n!!! Digest verification failed:\n",
3629                                         noiselevel=-1)
3630                                 writemsg("!!! %s\n" % e.value[0],
3631                                         noiselevel=-1)
3632                                 writemsg("!!! Reason: %s\n" % e.value[1],
3633                                         noiselevel=-1)
3634                                 writemsg("!!! Got: %s\n" % e.value[2],
3635                                         noiselevel=-1)
3636                                 writemsg("!!! Expected: %s\n" % e.value[3],
3637                                         noiselevel=-1)
3638                                 rval = 1
3639                         if rval != os.EX_OK:
3640                                 pkg_path = bintree.getname(pkg.cpv)
3641                                 head, tail = os.path.split(pkg_path)
3642                                 temp_filename = portage._checksum_failure_temp_file(head, tail)
3643                                 writemsg("File renamed to '%s'\n" % (temp_filename,),
3644                                         noiselevel=-1)
3645                 finally:
3646                         sys.stdout = stdout_orig
3647                         sys.stderr = stderr_orig
3648                         if log_file is not None:
3649                                 log_file.close()
3650
3651                 self.returncode = rval
3652                 self.wait()
3653
3654 class BinpkgPrefetcher(CompositeTask):
3655
3656         __slots__ = ("pkg",) + \
3657                 ("pkg_path", "_bintree",)
3658
3659         def _start(self):
3660                 self._bintree = self.pkg.root_config.trees["bintree"]
3661                 fetcher = BinpkgFetcher(background=self.background,
3662                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3663                         scheduler=self.scheduler)
3664                 self.pkg_path = fetcher.pkg_path
3665                 self._start_task(fetcher, self._fetcher_exit)
3666
3667         def _fetcher_exit(self, fetcher):
3668
3669                 if self._default_exit(fetcher) != os.EX_OK:
3670                         self.wait()
3671                         return
3672
3673                 verifier = BinpkgVerifier(background=self.background,
3674                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3675                 self._start_task(verifier, self._verifier_exit)
3676
3677         def _verifier_exit(self, verifier):
3678                 if self._default_exit(verifier) != os.EX_OK:
3679                         self.wait()
3680                         return
3681
3682                 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3683
3684                 self._current_task = None
3685                 self.returncode = os.EX_OK
3686                 self.wait()
3687
3688 class BinpkgExtractorAsync(SpawnProcess):
3689
3690         __slots__ = ("image_dir", "pkg", "pkg_path")
3691
3692         _shell_binary = portage.const.BASH_BINARY
3693
3694         def _start(self):
3695                 self.args = [self._shell_binary, "-c",
3696                         "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3697                         (portage._shell_quote(self.pkg_path),
3698                         portage._shell_quote(self.image_dir))]
3699
3700                 self.env = self.pkg.root_config.settings.environ()
3701                 SpawnProcess._start(self)
3702
3703 class MergeListItem(CompositeTask):
3704
3705         """
3706         TODO: For parallel scheduling, everything here needs asynchronous
3707         execution support (start, poll, and wait methods).
3708         """
3709
3710         __slots__ = ("args_set",
3711                 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3712                 "find_blockers", "logger", "mtimedb", "pkg",
3713                 "pkg_count", "pkg_to_replace", "prefetcher",
3714                 "settings", "statusMessage", "world_atom") + \
3715                 ("_install_task",)
3716
3717         def _start(self):
3718
3719                 pkg = self.pkg
3720                 build_opts = self.build_opts
3721
3722                 if pkg.installed:
3723                         # uninstall,  executed by self.merge()
3724                         self.returncode = os.EX_OK
3725                         self.wait()
3726                         return
3727
3728                 args_set = self.args_set
3729                 find_blockers = self.find_blockers
3730                 logger = self.logger
3731                 mtimedb = self.mtimedb
3732                 pkg_count = self.pkg_count
3733                 scheduler = self.scheduler
3734                 settings = self.settings
3735                 world_atom = self.world_atom
3736                 ldpath_mtimes = mtimedb["ldpath"]
3737
3738                 action_desc = "Emerging"
3739                 preposition = "for"
3740                 if pkg.type_name == "binary":
3741                         action_desc += " binary"
3742
3743                 if build_opts.fetchonly:
3744                         action_desc = "Fetching"
3745
3746                 msg = "%s (%s of %s) %s" % \
3747                         (action_desc,
3748                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3749                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3750                         colorize("GOOD", pkg.cpv))
3751
3752                 if pkg.root != "/":
3753                         msg += " %s %s" % (preposition, pkg.root)
3754
3755                 if not build_opts.pretend:
3756                         self.statusMessage(msg)
3757                         logger.log(" >>> emerge (%s of %s) %s to %s" % \
3758                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3759
3760                 if pkg.type_name == "ebuild":
3761
3762                         build = EbuildBuild(args_set=args_set,
3763                                 background=self.background,
3764                                 config_pool=self.config_pool,
3765                                 find_blockers=find_blockers,
3766                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3767                                 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3768                                 prefetcher=self.prefetcher, scheduler=scheduler,
3769                                 settings=settings, world_atom=world_atom)
3770
3771                         self._install_task = build
3772                         self._start_task(build, self._default_final_exit)
3773                         return
3774
3775                 elif pkg.type_name == "binary":
3776
3777                         binpkg = Binpkg(background=self.background,
3778                                 find_blockers=find_blockers,
3779                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3780                                 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3781                                 prefetcher=self.prefetcher, settings=settings,
3782                                 scheduler=scheduler, world_atom=world_atom)
3783
3784                         self._install_task = binpkg
3785                         self._start_task(binpkg, self._default_final_exit)
3786                         return
3787
3788         def _poll(self):
3789                 self._install_task.poll()
3790                 return self.returncode
3791
3792         def _wait(self):
3793                 self._install_task.wait()
3794                 return self.returncode
3795
3796         def merge(self):
3797
3798                 pkg = self.pkg
3799                 build_opts = self.build_opts
3800                 find_blockers = self.find_blockers
3801                 logger = self.logger
3802                 mtimedb = self.mtimedb
3803                 pkg_count = self.pkg_count
3804                 prefetcher = self.prefetcher
3805                 scheduler = self.scheduler
3806                 settings = self.settings
3807                 world_atom = self.world_atom
3808                 ldpath_mtimes = mtimedb["ldpath"]
3809
3810                 if pkg.installed:
3811                         if not (build_opts.buildpkgonly or \
3812                                 build_opts.fetchonly or build_opts.pretend):
3813
3814                                 uninstall = PackageUninstall(background=self.background,
3815                                         ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
3816                                         pkg=pkg, scheduler=scheduler, settings=settings)
3817
3818                                 uninstall.start()
3819                                 retval = uninstall.wait()
3820                                 if retval != os.EX_OK:
3821                                         return retval
3822                         return os.EX_OK
3823
3824                 if build_opts.fetchonly or \
3825                         build_opts.buildpkgonly:
3826                         return self.returncode
3827
3828                 retval = self._install_task.install()
3829                 return retval
3830
3831 class PackageMerge(AsynchronousTask):
3832         """
3833         TODO: Implement asynchronous merge so that the scheduler can
3834         run while a merge is executing.
3835         """
3836
3837         __slots__ = ("merge",)
3838
3839         def _start(self):
3840
3841                 pkg = self.merge.pkg
3842                 pkg_count = self.merge.pkg_count
3843
3844                 if pkg.installed:
3845                         action_desc = "Uninstalling"
3846                         preposition = "from"
3847                 else:
3848                         action_desc = "Installing"
3849                         preposition = "to"
3850
3851                 msg = "%s %s" % (action_desc, colorize("GOOD", pkg.cpv))
3852
3853                 if pkg.root != "/":
3854                         msg += " %s %s" % (preposition, pkg.root)
3855
3856                 if not self.merge.build_opts.fetchonly and \
3857                         not self.merge.build_opts.pretend and \
3858                         not self.merge.build_opts.buildpkgonly:
3859                         self.merge.statusMessage(msg)
3860
3861                 self.returncode = self.merge.merge()
3862                 self.wait()
3863
3864 class DependencyArg(object):
3865         def __init__(self, arg=None, root_config=None):
3866                 self.arg = arg
3867                 self.root_config = root_config
3868
3869         def __str__(self):
3870                 return str(self.arg)
3871
3872 class AtomArg(DependencyArg):
3873         def __init__(self, atom=None, **kwargs):
3874                 DependencyArg.__init__(self, **kwargs)
3875                 self.atom = atom
3876                 if not isinstance(self.atom, portage.dep.Atom):
3877                         self.atom = portage.dep.Atom(self.atom)
3878                 self.set = (self.atom, )
3879
3880 class PackageArg(DependencyArg):
3881         def __init__(self, package=None, **kwargs):
3882                 DependencyArg.__init__(self, **kwargs)
3883                 self.package = package
3884                 self.atom = portage.dep.Atom("=" + package.cpv)
3885                 self.set = (self.atom, )
3886
3887 class SetArg(DependencyArg):
3888         def __init__(self, set=None, **kwargs):
3889                 DependencyArg.__init__(self, **kwargs)
3890                 self.set = set
3891                 self.name = self.arg[len(SETPREFIX):]
3892
3893 class Dependency(SlotObject):
3894         __slots__ = ("atom", "blocker", "depth",
3895                 "parent", "onlydeps", "priority", "root")
3896         def __init__(self, **kwargs):
3897                 SlotObject.__init__(self, **kwargs)
3898                 if self.priority is None:
3899                         self.priority = DepPriority()
3900                 if self.depth is None:
3901                         self.depth = 0
3902
3903 class BlockerCache(DictMixin):
3904         """This caches blockers of installed packages so that dep_check does not
3905         have to be done for every single installed package on every invocation of
3906         emerge.  The cache is invalidated whenever it is detected that something
3907         has changed that might alter the results of dep_check() calls:
3908                 1) the set of installed packages (including COUNTER) has changed
3909                 2) the old-style virtuals have changed
3910         """
3911
3912         # Number of uncached packages to trigger cache update, since
3913         # it's wasteful to update it for every vdb change.
3914         _cache_threshold = 5
3915
3916         class BlockerData(object):
3917
3918                 __slots__ = ("__weakref__", "atoms", "counter")
3919
3920                 def __init__(self, counter, atoms):
3921                         self.counter = counter
3922                         self.atoms = atoms
3923
3924         def __init__(self, myroot, vardb):
3925                 self._vardb = vardb
3926                 self._virtuals = vardb.settings.getvirtuals()
3927                 self._cache_filename = os.path.join(myroot,
3928                         portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
3929                 self._cache_version = "1"
3930                 self._cache_data = None
3931                 self._modified = set()
3932                 self._load()
3933
3934         def _load(self):
3935                 try:
3936                         f = open(self._cache_filename)
3937                         mypickle = pickle.Unpickler(f)
3938                         mypickle.find_global = None
3939                         self._cache_data = mypickle.load()
3940                         f.close()
3941                         del f
3942                 except (IOError, OSError, EOFError, pickle.UnpicklingError), e:
3943                         if isinstance(e, pickle.UnpicklingError):
3944                                 writemsg("!!! Error loading '%s': %s\n" % \
3945                                         (self._cache_filename, str(e)), noiselevel=-1)
3946                         del e
3947
3948                 cache_valid = self._cache_data and \
3949                         isinstance(self._cache_data, dict) and \
3950                         self._cache_data.get("version") == self._cache_version and \
3951                         isinstance(self._cache_data.get("blockers"), dict)
3952                 if cache_valid:
3953                         # Validate all the atoms and counters so that
3954                         # corruption is detected as soon as possible.
3955                         invalid_items = set()
3956                         for k, v in self._cache_data["blockers"].iteritems():
3957                                 if not isinstance(k, basestring):
3958                                         invalid_items.add(k)
3959                                         continue
3960                                 try:
3961                                         if portage.catpkgsplit(k) is None:
3962                                                 invalid_items.add(k)
3963                                                 continue
3964                                 except portage.exception.InvalidData:
3965                                         invalid_items.add(k)
3966                                         continue
3967                                 if not isinstance(v, tuple) or \
3968                                         len(v) != 2:
3969                                         invalid_items.add(k)
3970                                         continue
3971                                 counter, atoms = v
3972                                 if not isinstance(counter, (int, long)):
3973                                         invalid_items.add(k)
3974                                         continue
3975                                 if not isinstance(atoms, (list, tuple)):
3976                                         invalid_items.add(k)
3977                                         continue
3978                                 invalid_atom = False
3979                                 for atom in atoms:
3980                                         if not isinstance(atom, basestring):
3981                                                 invalid_atom = True
3982                                                 break
3983                                         if atom[:1] != "!" or \
3984                                                 not portage.isvalidatom(
3985                                                 atom, allow_blockers=True):
3986                                                 invalid_atom = True
3987                                                 break
3988                                 if invalid_atom:
3989                                         invalid_items.add(k)
3990                                         continue
3991
3992                         for k in invalid_items:
3993                                 del self._cache_data["blockers"][k]
3994                         if not self._cache_data["blockers"]:
3995                                 cache_valid = False
3996
3997                 if not cache_valid:
3998                         self._cache_data = {"version":self._cache_version}
3999                         self._cache_data["blockers"] = {}
4000                         self._cache_data["virtuals"] = self._virtuals
4001                 self._modified.clear()
4002
4003         def flush(self):
4004                 """If the current user has permission and the internal blocker cache
4005                 been updated, save it to disk and mark it unmodified.  This is called
4006                 by emerge after it has proccessed blockers for all installed packages.
4007                 Currently, the cache is only written if the user has superuser
4008                 privileges (since that's required to obtain a lock), but all users
4009                 have read access and benefit from faster blocker lookups (as long as
4010                 the entire cache is still valid).  The cache is stored as a pickled
4011                 dict object with the following format:
4012
4013                 {
4014                         version : "1",
4015                         "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4016                         "virtuals" : vardb.settings.getvirtuals()
4017                 }
4018                 """
4019                 if len(self._modified) >= self._cache_threshold and \
4020                         secpass >= 2:
4021                         try:
4022                                 f = portage.util.atomic_ofstream(self._cache_filename)
4023                                 pickle.dump(self._cache_data, f, -1)
4024                                 f.close()
4025                                 portage.util.apply_secpass_permissions(
4026                                         self._cache_filename, gid=portage.portage_gid, mode=0644)
4027                         except (IOError, OSError), e:
4028                                 pass
4029                         self._modified.clear()
4030
4031         def __setitem__(self, cpv, blocker_data):
4032                 """
4033                 Update the cache and mark it as modified for a future call to
4034                 self.flush().
4035
4036                 @param cpv: Package for which to cache blockers.
4037                 @type cpv: String
4038                 @param blocker_data: An object with counter and atoms attributes.
4039                 @type blocker_data: BlockerData
4040                 """
4041                 self._cache_data["blockers"][cpv] = \
4042                         (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4043                 self._modified.add(cpv)
4044
4045         def __iter__(self):
4046                 if self._cache_data is None:
4047                         # triggered by python-trace
4048                         return iter([])
4049                 return iter(self._cache_data["blockers"])
4050
4051         def __delitem__(self, cpv):
4052                 del self._cache_data["blockers"][cpv]
4053
4054         def __getitem__(self, cpv):
4055                 """
4056                 @rtype: BlockerData
4057                 @returns: An object with counter and atoms attributes.
4058                 """
4059                 return self.BlockerData(*self._cache_data["blockers"][cpv])
4060
4061         def keys(self):
4062                 """This needs to be implemented so that self.__repr__() doesn't raise
4063                 an AttributeError."""
4064                 return list(self)
4065
4066 class BlockerDB(object):
4067
4068         def __init__(self, root_config):
4069                 self._root_config = root_config
4070                 self._vartree = root_config.trees["vartree"]
4071                 self._portdb = root_config.trees["porttree"].dbapi
4072
4073                 self._dep_check_trees = None
4074                 self._fake_vartree = None
4075
4076         def _get_fake_vartree(self, acquire_lock=0):
4077                 fake_vartree = self._fake_vartree
4078                 if fake_vartree is None:
4079                         fake_vartree = FakeVartree(self._root_config,
4080                                 acquire_lock=acquire_lock)
4081                         self._fake_vartree = fake_vartree
4082                         self._dep_check_trees = { self._vartree.root : {
4083                                 "porttree"    :  fake_vartree,
4084                                 "vartree"     :  fake_vartree,
4085                         }}
4086                 else:
4087                         fake_vartree.sync(acquire_lock=acquire_lock)
4088                 return fake_vartree
4089
4090         def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4091                 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4092                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4093                 settings = self._vartree.settings
4094                 stale_cache = set(blocker_cache)
4095                 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4096                 dep_check_trees = self._dep_check_trees
4097                 vardb = fake_vartree.dbapi
4098                 installed_pkgs = list(vardb)
4099
4100                 for inst_pkg in installed_pkgs:
4101                         stale_cache.discard(inst_pkg.cpv)
4102                         cached_blockers = blocker_cache.get(inst_pkg.cpv)
4103                         if cached_blockers is not None and \
4104                                 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4105                                 cached_blockers = None
4106                         if cached_blockers is not None:
4107                                 blocker_atoms = cached_blockers.atoms
4108                         else:
4109                                 # Use aux_get() to trigger FakeVartree global
4110                                 # updates on *DEPEND when appropriate.
4111                                 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4112                                 try:
4113                                         portage.dep._dep_check_strict = False
4114                                         success, atoms = portage.dep_check(depstr,
4115                                                 vardb, settings, myuse=inst_pkg.use.enabled,
4116                                                 trees=dep_check_trees, myroot=inst_pkg.root)
4117                                 finally:
4118                                         portage.dep._dep_check_strict = True
4119                                 if not success:
4120                                         pkg_location = os.path.join(inst_pkg.root,
4121                                                 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4122                                         portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4123                                                 (pkg_location, atoms), noiselevel=-1)
4124                                         continue
4125
4126                                 blocker_atoms = [atom for atom in atoms \
4127                                         if atom.startswith("!")]
4128                                 blocker_atoms.sort()
4129                                 counter = long(inst_pkg.metadata["COUNTER"])
4130                                 blocker_cache[inst_pkg.cpv] = \
4131                                         blocker_cache.BlockerData(counter, blocker_atoms)
4132                 for cpv in stale_cache:
4133                         del blocker_cache[cpv]
4134                 blocker_cache.flush()
4135
4136                 blocker_parents = digraph()
4137                 blocker_atoms = []
4138                 for pkg in installed_pkgs:
4139                         for blocker_atom in blocker_cache[pkg.cpv].atoms:
4140                                 blocker_atom = blocker_atom.lstrip("!")
4141                                 blocker_atoms.append(blocker_atom)
4142                                 blocker_parents.add(blocker_atom, pkg)
4143
4144                 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4145                 blocking_pkgs = set()
4146                 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4147                         blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4148
4149                 # Check for blockers in the other direction.
4150                 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4151                 try:
4152                         portage.dep._dep_check_strict = False
4153                         success, atoms = portage.dep_check(depstr,
4154                                 vardb, settings, myuse=new_pkg.use.enabled,
4155                                 trees=dep_check_trees, myroot=new_pkg.root)
4156                 finally:
4157                         portage.dep._dep_check_strict = True
4158                 if not success:
4159                         # We should never get this far with invalid deps.
4160                         show_invalid_depstring_notice(new_pkg, depstr, atoms)
4161                         assert False
4162
4163                 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4164                         if atom[:1] == "!"]
4165                 if blocker_atoms:
4166                         blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4167                         for inst_pkg in installed_pkgs:
4168                                 try:
4169                                         blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4170                                 except (portage.exception.InvalidDependString, StopIteration):
4171                                         continue
4172                                 blocking_pkgs.add(inst_pkg)
4173
4174                 return blocking_pkgs
4175
4176 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4177
4178         msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4179                 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4180         p_type, p_root, p_key, p_status = parent_node
4181         msg = []
4182         if p_status == "nomerge":
4183                 category, pf = portage.catsplit(p_key)
4184                 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4185                 msg.append("Portage is unable to process the dependencies of the ")
4186                 msg.append("'%s' package. " % p_key)
4187                 msg.append("In order to correct this problem, the package ")
4188                 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4189                 msg.append("As a temporary workaround, the --nodeps option can ")
4190                 msg.append("be used to ignore all dependencies.  For reference, ")
4191                 msg.append("the problematic dependencies can be found in the ")
4192                 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4193         else:
4194                 msg.append("This package can not be installed. ")
4195                 msg.append("Please notify the '%s' package maintainer " % p_key)
4196                 msg.append("about this problem.")
4197
4198         msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4199         writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4200
4201 class PackageVirtualDbapi(portage.dbapi):
4202         """
4203         A dbapi-like interface class that represents the state of the installed
4204         package database as new packages are installed, replacing any packages
4205         that previously existed in the same slot. The main difference between
4206         this class and fakedbapi is that this one uses Package instances
4207         internally (passed in via cpv_inject() and cpv_remove() calls).
4208         """
4209         def __init__(self, settings):
4210                 portage.dbapi.__init__(self)
4211                 self.settings = settings
4212                 self._match_cache = {}
4213                 self._cp_map = {}
4214                 self._cpv_map = {}
4215
4216         def clear(self):
4217                 """
4218                 Remove all packages.
4219                 """
4220                 if self._cpv_map:
4221                         self._clear_cache()
4222                         self._cp_map.clear()
4223                         self._cpv_map.clear()
4224
4225         def copy(self):
4226                 obj = PackageVirtualDbapi(self.settings)
4227                 obj._match_cache = self._match_cache.copy()
4228                 obj._cp_map = self._cp_map.copy()
4229                 for k, v in obj._cp_map.iteritems():
4230                         obj._cp_map[k] = v[:]
4231                 obj._cpv_map = self._cpv_map.copy()
4232                 return obj
4233
4234         def __iter__(self):
4235                 return self._cpv_map.itervalues()
4236
4237         def __contains__(self, item):
4238                 existing = self._cpv_map.get(item.cpv)
4239                 if existing is not None and \
4240                         existing == item:
4241                         return True
4242                 return False
4243
4244         def get(self, item, default=None):
4245                 cpv = getattr(item, "cpv", None)
4246                 if cpv is None:
4247                         if len(item) != 4:
4248                                 return default
4249                         type_name, root, cpv, operation = item
4250
4251                 existing = self._cpv_map.get(cpv)
4252                 if existing is not None and \
4253                         existing == item:
4254                         return existing
4255                 return default
4256
4257         def match_pkgs(self, atom):
4258                 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4259
4260         def _clear_cache(self):
4261                 if self._categories is not None:
4262                         self._categories = None
4263                 if self._match_cache:
4264                         self._match_cache = {}
4265
4266         def match(self, origdep, use_cache=1):
4267                 result = self._match_cache.get(origdep)
4268                 if result is not None:
4269                         return result[:]
4270                 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4271                 self._match_cache[origdep] = result
4272                 return result[:]
4273
4274         def cpv_exists(self, cpv):
4275                 return cpv in self._cpv_map
4276
4277         def cp_list(self, mycp, use_cache=1):
4278                 cachelist = self._match_cache.get(mycp)
4279                 # cp_list() doesn't expand old-style virtuals
4280                 if cachelist and cachelist[0].startswith(mycp):
4281                         return cachelist[:]
4282                 cpv_list = self._cp_map.get(mycp)
4283                 if cpv_list is None:
4284                         cpv_list = []
4285                 else:
4286                         cpv_list = [pkg.cpv for pkg in cpv_list]
4287                 self._cpv_sort_ascending(cpv_list)
4288                 if not (not cpv_list and mycp.startswith("virtual/")):
4289                         self._match_cache[mycp] = cpv_list
4290                 return cpv_list[:]
4291
4292         def cp_all(self):
4293                 return list(self._cp_map)
4294
4295         def cpv_all(self):
4296                 return list(self._cpv_map)
4297
4298         def cpv_inject(self, pkg):
4299                 cp_list = self._cp_map.get(pkg.cp)
4300                 if cp_list is None:
4301                         cp_list = []
4302                         self._cp_map[pkg.cp] = cp_list
4303                 e_pkg = self._cpv_map.get(pkg.cpv)
4304                 if e_pkg is not None:
4305                         if e_pkg == pkg:
4306                                 return
4307                         self.cpv_remove(e_pkg)
4308                 for e_pkg in cp_list:
4309                         if e_pkg.slot_atom == pkg.slot_atom:
4310                                 if e_pkg == pkg:
4311                                         return
4312                                 self.cpv_remove(e_pkg)
4313                                 break
4314                 cp_list.append(pkg)
4315                 self._cpv_map[pkg.cpv] = pkg
4316                 self._clear_cache()
4317
4318         def cpv_remove(self, pkg):
4319                 old_pkg = self._cpv_map.get(pkg.cpv)
4320                 if old_pkg != pkg:
4321                         raise KeyError(pkg)
4322                 self._cp_map[pkg.cp].remove(pkg)
4323                 del self._cpv_map[pkg.cpv]
4324                 self._clear_cache()
4325
4326         def aux_get(self, cpv, wants):
4327                 metadata = self._cpv_map[cpv].metadata
4328                 return [metadata.get(x, "") for x in wants]
4329
4330         def aux_update(self, cpv, values):
4331                 self._cpv_map[cpv].metadata.update(values)
4332                 self._clear_cache()
4333
4334 class depgraph(object):
4335
4336         pkg_tree_map = RootConfig.pkg_tree_map
4337
4338         _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4339
4340         def __init__(self, settings, trees, myopts, myparams, spinner):
4341                 self.settings = settings
4342                 self.target_root = settings["ROOT"]
4343                 self.myopts = myopts
4344                 self.myparams = myparams
4345                 self.edebug = 0
4346                 if settings.get("PORTAGE_DEBUG", "") == "1":
4347                         self.edebug = 1
4348                 self.spinner = spinner
4349                 self._running_root = trees["/"]["root_config"]
4350                 self._opts_no_restart = Scheduler._opts_no_restart
4351                 self.pkgsettings = {}
4352                 # Maps slot atom to package for each Package added to the graph.
4353                 self._slot_pkg_map = {}
4354                 # Maps nodes to the reasons they were selected for reinstallation.
4355                 self._reinstall_nodes = {}
4356                 self.mydbapi = {}
4357                 self.trees = {}
4358                 self._trees_orig = trees
4359                 self.roots = {}
4360                 # Contains a filtered view of preferred packages that are selected
4361                 # from available repositories.
4362                 self._filtered_trees = {}
4363                 # Contains installed packages and new packages that have been added
4364                 # to the graph.
4365                 self._graph_trees = {}
4366                 # All Package instances
4367                 self._pkg_cache = {}
4368                 for myroot in trees:
4369                         self.trees[myroot] = {}
4370                         # Create a RootConfig instance that references
4371                         # the FakeVartree instead of the real one.
4372                         self.roots[myroot] = RootConfig(
4373                                 trees[myroot]["vartree"].settings,
4374                                 self.trees[myroot],
4375                                 trees[myroot]["root_config"].setconfig)
4376                         for tree in ("porttree", "bintree"):
4377                                 self.trees[myroot][tree] = trees[myroot][tree]
4378                         self.trees[myroot]["vartree"] = \
4379                                 FakeVartree(trees[myroot]["root_config"],
4380                                         pkg_cache=self._pkg_cache)
4381                         self.pkgsettings[myroot] = portage.config(
4382                                 clone=self.trees[myroot]["vartree"].settings)
4383                         self._slot_pkg_map[myroot] = {}
4384                         vardb = self.trees[myroot]["vartree"].dbapi
4385                         preload_installed_pkgs = "--nodeps" not in self.myopts and \
4386                                 "--buildpkgonly" not in self.myopts
4387                         # This fakedbapi instance will model the state that the vdb will
4388                         # have after new packages have been installed.
4389                         fakedb = PackageVirtualDbapi(vardb.settings)
4390                         if preload_installed_pkgs:
4391                                 for pkg in vardb:
4392                                         self.spinner.update()
4393                                         # This triggers metadata updates via FakeVartree.
4394                                         vardb.aux_get(pkg.cpv, [])
4395                                         fakedb.cpv_inject(pkg)
4396
4397                         # Now that the vardb state is cached in our FakeVartree,
4398                         # we won't be needing the real vartree cache for awhile.
4399                         # To make some room on the heap, clear the vardbapi
4400                         # caches.
4401                         trees[myroot]["vartree"].dbapi._clear_cache()
4402                         gc.collect()
4403
4404                         self.mydbapi[myroot] = fakedb
4405                         def graph_tree():
4406                                 pass
4407                         graph_tree.dbapi = fakedb
4408                         self._graph_trees[myroot] = {}
4409                         self._filtered_trees[myroot] = {}
4410                         # Substitute the graph tree for the vartree in dep_check() since we
4411                         # want atom selections to be consistent with package selections
4412                         # have already been made.
4413                         self._graph_trees[myroot]["porttree"]   = graph_tree
4414                         self._graph_trees[myroot]["vartree"]    = graph_tree
4415                         def filtered_tree():
4416                                 pass
4417                         filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4418                         self._filtered_trees[myroot]["porttree"] = filtered_tree
4419
4420                         # Passing in graph_tree as the vartree here could lead to better
4421                         # atom selections in some cases by causing atoms for packages that
4422                         # have been added to the graph to be preferred over other choices.
4423                         # However, it can trigger atom selections that result in
4424                         # unresolvable direct circular dependencies. For example, this
4425                         # happens with gwydion-dylan which depends on either itself or
4426                         # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4427                         # gwydion-dylan-bin needs to be selected in order to avoid a
4428                         # an unresolvable direct circular dependency.
4429                         #
4430                         # To solve the problem described above, pass in "graph_db" so that
4431                         # packages that have been added to the graph are distinguishable
4432                         # from other available packages and installed packages. Also, pass
4433                         # the parent package into self._select_atoms() calls so that
4434                         # unresolvable direct circular dependencies can be detected and
4435                         # avoided when possible.
4436                         self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4437                         self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4438
4439                         dbs = []
4440                         portdb = self.trees[myroot]["porttree"].dbapi
4441                         bindb  = self.trees[myroot]["bintree"].dbapi
4442                         vardb  = self.trees[myroot]["vartree"].dbapi
4443                         #               (db, pkg_type, built, installed, db_keys)
4444                         if "--usepkgonly" not in self.myopts:
4445                                 db_keys = list(portdb._aux_cache_keys)
4446                                 dbs.append((portdb, "ebuild", False, False, db_keys))
4447                         if "--usepkg" in self.myopts:
4448                                 db_keys = list(bindb._aux_cache_keys)
4449                                 dbs.append((bindb,  "binary", True, False, db_keys))
4450                         db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4451                         dbs.append((vardb, "installed", True, True, db_keys))
4452                         self._filtered_trees[myroot]["dbs"] = dbs
4453                         if "--usepkg" in self.myopts:
4454                                 self.trees[myroot]["bintree"].populate(
4455                                         "--getbinpkg" in self.myopts,
4456                                         "--getbinpkgonly" in self.myopts)
4457                 del trees
4458
4459                 self.digraph=portage.digraph()
4460                 # contains all sets added to the graph
4461                 self._sets = {}
4462                 # contains atoms given as arguments
4463                 self._sets["args"] = InternalPackageSet()
4464                 # contains all atoms from all sets added to the graph, including
4465                 # atoms given as arguments
4466                 self._set_atoms = InternalPackageSet()
4467                 self._atom_arg_map = {}
4468                 # contains all nodes pulled in by self._set_atoms
4469                 self._set_nodes = set()
4470                 # Contains only Blocker -> Uninstall edges
4471                 self._blocker_uninstalls = digraph()
4472                 # Contains only Package -> Blocker edges
4473                 self._blocker_parents = digraph()
4474                 # Contains only irrelevant Package -> Blocker edges
4475                 self._irrelevant_blockers = digraph()
4476                 # Contains only unsolvable Package -> Blocker edges
4477                 self._unsolvable_blockers = digraph()
4478                 self._slot_collision_info = {}
4479                 # Slot collision nodes are not allowed to block other packages since
4480                 # blocker validation is only able to account for one package per slot.
4481                 self._slot_collision_nodes = set()
4482                 self._parent_atoms = {}
4483                 self._slot_conflict_parent_atoms = set()
4484                 self._serialized_tasks_cache = None
4485                 self._scheduler_graph = None
4486                 self._displayed_list = None
4487                 self._pprovided_args = []
4488                 self._missing_args = []
4489                 self._masked_installed = set()
4490                 self._unsatisfied_deps_for_display = []
4491                 self._unsatisfied_blockers_for_display = None
4492                 self._circular_deps_for_display = None
4493                 self._dep_stack = []
4494                 self._unsatisfied_deps = []
4495                 self._initially_unsatisfied_deps = []
4496                 self._ignored_deps = []
4497                 self._required_set_names = set(["system", "world"])
4498                 self._select_atoms = self._select_atoms_highest_available
4499                 self._select_package = self._select_pkg_highest_available
4500                 self._highest_pkg_cache = {}
4501
4502         def _show_slot_collision_notice(self):
4503                 """Show an informational message advising the user to mask one of the
4504                 the packages. In some cases it may be possible to resolve this
4505                 automatically, but support for backtracking (removal nodes that have
4506                 already been selected) will be required in order to handle all possible
4507                 cases.
4508                 """
4509
4510                 if not self._slot_collision_info:
4511                         return
4512
4513                 self._show_merge_list()
4514
4515                 msg = []
4516                 msg.append("\n!!! Multiple package instances within a single " + \
4517                         "package slot have been pulled\n")
4518                 msg.append("!!! into the dependency graph, resulting" + \
4519                         " in a slot conflict:\n\n")
4520                 indent = "  "
4521                 # Max number of parents shown, to avoid flooding the display.
4522                 max_parents = 3
4523                 explanation_columns = 70
4524                 explanations = 0
4525                 for (slot_atom, root), slot_nodes \
4526                         in self._slot_collision_info.iteritems():
4527                         msg.append(str(slot_atom))
4528                         msg.append("\n\n")
4529
4530                         for node in slot_nodes:
4531                                 msg.append(indent)
4532                                 msg.append(str(node))
4533                                 parent_atoms = self._parent_atoms.get(node)
4534                                 if parent_atoms:
4535                                         pruned_list = set()
4536                                         # Prefer conflict atoms over others.
4537                                         for parent_atom in parent_atoms:
4538                                                 if len(pruned_list) >= max_parents:
4539                                                         break
4540                                                 if parent_atom in self._slot_conflict_parent_atoms:
4541                                                         pruned_list.add(parent_atom)
4542
4543                                         # If this package was pulled in by conflict atoms then
4544                                         # show those alone since those are the most interesting.
4545                                         if not pruned_list:
4546                                                 # When generating the pruned list, prefer instances
4547                                                 # of DependencyArg over instances of Package.
4548                                                 for parent_atom in parent_atoms:
4549                                                         if len(pruned_list) >= max_parents:
4550                                                                 break
4551                                                         parent, atom = parent_atom
4552                                                         if isinstance(parent, DependencyArg):
4553                                                                 pruned_list.add(parent_atom)
4554                                                 # Prefer Packages instances that themselves have been
4555                                                 # pulled into collision slots.
4556                                                 for parent_atom in parent_atoms:
4557                                                         if len(pruned_list) >= max_parents:
4558                                                                 break
4559                                                         parent, atom = parent_atom
4560                                                         if isinstance(parent, Package) and \
4561                                                                 (parent.slot_atom, parent.root) \
4562                                                                 in self._slot_collision_info:
4563                                                                 pruned_list.add(parent_atom)
4564                                                 for parent_atom in parent_atoms:
4565                                                         if len(pruned_list) >= max_parents:
4566                                                                 break
4567                                                         pruned_list.add(parent_atom)
4568                                         omitted_parents = len(parent_atoms) - len(pruned_list)
4569                                         parent_atoms = pruned_list
4570                                         msg.append(" pulled in by\n")
4571                                         for parent_atom in parent_atoms:
4572                                                 parent, atom = parent_atom
4573                                                 msg.append(2*indent)
4574                                                 if isinstance(parent,
4575                                                         (PackageArg, AtomArg)):
4576                                                         # For PackageArg and AtomArg types, it's
4577                                                         # redundant to display the atom attribute.
4578                                                         msg.append(str(parent))
4579                                                 else:
4580                                                         # Display the specific atom from SetArg or
4581                                                         # Package types.
4582                                                         msg.append("%s required by %s" % (atom, parent))
4583                                                 msg.append("\n")
4584                                         if omitted_parents:
4585                                                 msg.append(2*indent)
4586                                                 msg.append("(and %d more)\n" % omitted_parents)
4587                                 else:
4588                                         msg.append(" (no parents)\n")
4589                                 msg.append("\n")
4590                         explanation = self._slot_conflict_explanation(slot_nodes)
4591                         if explanation:
4592                                 explanations += 1
4593                                 msg.append(indent + "Explanation:\n\n")
4594                                 for line in textwrap.wrap(explanation, explanation_columns):
4595                                         msg.append(2*indent + line + "\n")
4596                                 msg.append("\n")
4597                 msg.append("\n")
4598                 sys.stderr.write("".join(msg))
4599                 sys.stderr.flush()
4600
4601                 explanations_for_all = explanations == len(self._slot_collision_info)
4602
4603                 if explanations_for_all or "--quiet" in self.myopts:
4604                         return
4605
4606                 msg = []
4607                 msg.append("It may be possible to solve this problem ")
4608                 msg.append("by using package.mask to prevent one of ")
4609                 msg.append("those packages from being selected. ")
4610                 msg.append("However, it is also possible that conflicting ")
4611                 msg.append("dependencies exist such that they are impossible to ")
4612                 msg.append("satisfy simultaneously.  If such a conflict exists in ")
4613                 msg.append("the dependencies of two different packages, then those ")
4614                 msg.append("packages can not be installed simultaneously.")
4615
4616                 from formatter import AbstractFormatter, DumbWriter
4617                 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4618                 for x in msg:
4619                         f.add_flowing_data(x)
4620                 f.end_paragraph(1)
4621
4622                 msg = []
4623                 msg.append("For more information, see MASKED PACKAGES ")
4624                 msg.append("section in the emerge man page or refer ")
4625                 msg.append("to the Gentoo Handbook.")
4626                 for x in msg:
4627                         f.add_flowing_data(x)
4628                 f.end_paragraph(1)
4629                 f.writer.flush()
4630
4631         def _slot_conflict_explanation(self, slot_nodes):
4632                 """
4633                 When a slot conflict occurs due to USE deps, there are a few
4634                 different cases to consider:
4635
4636                 1) New USE are correctly set but --newuse wasn't requested so an
4637                    installed package with incorrect USE happened to get pulled
4638                    into graph before the new one.
4639
4640                 2) New USE are incorrectly set but an installed package has correct
4641                    USE so it got pulled into the graph, and a new instance also got
4642                    pulled in due to --newuse or an upgrade.
4643
4644                 3) Multiple USE deps exist that can't be satisfied simultaneously,
4645                    and multiple package instances got pulled into the same slot to
4646                    satisfy the conflicting deps.
4647
4648                 Currently, explanations and suggested courses of action are generated
4649                 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4650                 """
4651
4652                 if len(slot_nodes) != 2:
4653                         # Suggestions are only implemented for
4654                         # conflicts between two packages.
4655                         return None
4656
4657                 all_conflict_atoms = self._slot_conflict_parent_atoms
4658                 matched_node = None
4659                 matched_atoms = None
4660                 unmatched_node = None
4661                 for node in slot_nodes:
4662                         parent_atoms = self._parent_atoms.get(node)
4663                         if not parent_atoms:
4664                                 # Normally, there are always parent atoms. If there are
4665                                 # none then something unexpected is happening and there's
4666                                 # currently no suggestion for this case.
4667                                 return None
4668                         conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4669                         for parent_atom in conflict_atoms:
4670                                 parent, atom = parent_atom
4671                                 if not atom.use:
4672                                         # Suggestions are currently only implemented for cases
4673                                         # in which all conflict atoms have USE deps.
4674                                         return None
4675                         if conflict_atoms:
4676                                 if matched_node is not None:
4677                                         # If conflict atoms match multiple nodes
4678                                         # then there's no suggestion.
4679                                         return None
4680                                 matched_node = node
4681                                 matched_atoms = conflict_atoms
4682                         else:
4683                                 if unmatched_node is not None:
4684                                         # Neither node is matched by conflict atoms, and
4685                                         # there is no suggestion for this case.
4686                                         return None
4687                                 unmatched_node = node
4688
4689                 if matched_node is None or unmatched_node is None:
4690                         # This shouldn't happen.
4691                         return None
4692
4693                 if unmatched_node.installed and not matched_node.installed:
4694                         return "New USE are correctly set, but --newuse wasn't" + \
4695                                 " requested, so an installed package with incorrect USE " + \
4696                                 "happened to get pulled into the dependency graph. " + \
4697                                 "In order to solve " + \
4698                                 "this, either specify the --newuse option or explicitly " + \
4699                                 " reinstall '%s'." % matched_node.slot_atom
4700
4701                 if matched_node.installed and not unmatched_node.installed:
4702                         atoms = sorted(set(atom for parent, atom in matched_atoms))
4703                         explanation = ("New USE for '%s' are incorrectly set. " + \
4704                                 "In order to solve this, adjust USE to satisfy '%s'") % \
4705                                 (matched_node.slot_atom, atoms[0])
4706                         if len(atoms) > 1:
4707                                 for atom in atoms[1:-1]:
4708                                         explanation += ", '%s'" % (atom,)
4709                                 if len(atoms) > 2:
4710                                         explanation += ","
4711                                 explanation += " and '%s'" % (atoms[-1],)
4712                         explanation += "."
4713                         return explanation
4714
4715                 return None
4716
4717         def _process_slot_conflicts(self):
4718                 """
4719                 Process slot conflict data to identify specific atoms which
4720                 lead to conflict. These atoms only match a subset of the
4721                 packages that have been pulled into a given slot.
4722                 """
4723                 for (slot_atom, root), slot_nodes \
4724                         in self._slot_collision_info.iteritems():
4725
4726                         all_parent_atoms = set()
4727                         for pkg in slot_nodes:
4728                                 parent_atoms = self._parent_atoms.get(pkg)
4729                                 if not parent_atoms:
4730                                         continue
4731                                 all_parent_atoms.update(parent_atoms)
4732
4733                         for pkg in slot_nodes:
4734                                 parent_atoms = self._parent_atoms.get(pkg)
4735                                 if parent_atoms is None:
4736                                         parent_atoms = set()
4737                                         self._parent_atoms[pkg] = parent_atoms
4738                                 for parent_atom in all_parent_atoms:
4739                                         if parent_atom in parent_atoms:
4740                                                 continue
4741                                         # Use package set for matching since it will match via
4742                                         # PROVIDE when necessary, while match_from_list does not.
4743                                         parent, atom = parent_atom
4744                                         atom_set = InternalPackageSet(
4745                                                 initial_atoms=(atom,))
4746                                         if atom_set.findAtomForPackage(pkg):
4747                                                 parent_atoms.add(parent_atom)
4748                                         else:
4749                                                 self._slot_conflict_parent_atoms.add(parent_atom)
4750
4751         def _reinstall_for_flags(self, forced_flags,
4752                 orig_use, orig_iuse, cur_use, cur_iuse):
4753                 """Return a set of flags that trigger reinstallation, or None if there
4754                 are no such flags."""
4755                 if "--newuse" in self.myopts:
4756                         flags = set(orig_iuse.symmetric_difference(
4757                                 cur_iuse).difference(forced_flags))
4758                         flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4759                                 cur_iuse.intersection(cur_use)))
4760                         if flags:
4761                                 return flags
4762                 elif "changed-use" == self.myopts.get("--reinstall"):
4763                         flags = orig_iuse.intersection(orig_use).symmetric_difference(
4764                                 cur_iuse.intersection(cur_use))
4765                         if flags:
4766                                 return flags
4767                 return None
4768
4769         def _create_graph(self, allow_unsatisfied=False):
4770                 dep_stack = self._dep_stack
4771                 while dep_stack:
4772                         self.spinner.update()
4773                         dep = dep_stack.pop()
4774                         if isinstance(dep, Package):
4775                                 if not self._add_pkg_deps(dep,
4776                                         allow_unsatisfied=allow_unsatisfied):
4777                                         return 0
4778                                 continue
4779                         if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4780                                 return 0
4781                 return 1
4782
4783         def _add_dep(self, dep, allow_unsatisfied=False):
4784                 debug = "--debug" in self.myopts
4785                 buildpkgonly = "--buildpkgonly" in self.myopts
4786                 nodeps = "--nodeps" in self.myopts
4787                 empty = "empty" in self.myparams
4788                 deep = "deep" in self.myparams
4789                 update = "--update" in self.myopts and dep.depth <= 1
4790                 if dep.blocker:
4791                         if not buildpkgonly and \
4792                                 not nodeps and \
4793                                 dep.parent not in self._slot_collision_nodes:
4794                                 if dep.parent.onlydeps:
4795                                         # It's safe to ignore blockers if the
4796                                         # parent is an --onlydeps node.
4797                                         return 1
4798                                 # The blocker applies to the root where
4799                                 # the parent is or will be installed.
4800                                 blocker = Blocker(atom=dep.atom,
4801                                         eapi=dep.parent.metadata["EAPI"],
4802                                         root=dep.parent.root)
4803                                 self._blocker_parents.add(blocker, dep.parent)
4804                         return 1
4805                 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
4806                         onlydeps=dep.onlydeps)
4807                 if not dep_pkg:
4808                         if allow_unsatisfied:
4809                                 self._unsatisfied_deps.append(dep)
4810                                 return 1
4811                         self._unsatisfied_deps_for_display.append(
4812                                 ((dep.root, dep.atom), {"myparent":dep.parent}))
4813                         return 0
4814                 # In some cases, dep_check will return deps that shouldn't
4815                 # be proccessed any further, so they are identified and
4816                 # discarded here. Try to discard as few as possible since
4817                 # discarded dependencies reduce the amount of information
4818                 # available for optimization of merge order.
4819                 if dep.priority.satisfied and \
4820                         not (existing_node or empty or deep or update):
4821                         myarg = None
4822                         if dep.root == self.target_root:
4823                                 try:
4824                                         myarg = self._iter_atoms_for_pkg(dep_pkg).next()
4825                                 except StopIteration:
4826                                         pass
4827                                 except portage.exception.InvalidDependString:
4828                                         if not dep_pkg.installed:
4829                                                 # This shouldn't happen since the package
4830                                                 # should have been masked.
4831                                                 raise
4832                         if not myarg:
4833                                 self._ignored_deps.append(dep)
4834                                 return 1
4835
4836                 if not self._add_pkg(dep_pkg, dep):
4837                         return 0
4838                 return 1
4839
4840         def _add_pkg(self, pkg, dep):
4841                 myparent = None
4842                 priority = None
4843                 depth = 0
4844                 if dep is None:
4845                         dep = Dependency()
4846                 else:
4847                         myparent = dep.parent
4848                         priority = dep.priority
4849                         depth = dep.depth
4850                 if priority is None:
4851                         priority = DepPriority()
4852                 """
4853                 Fills the digraph with nodes comprised of packages to merge.
4854                 mybigkey is the package spec of the package to merge.
4855                 myparent is the package depending on mybigkey ( or None )
4856                 addme = Should we add this package to the digraph or are we just looking at it's deps?
4857                         Think --onlydeps, we need to ignore packages in that case.
4858                 #stuff to add:
4859                 #SLOT-aware emerge
4860                 #IUSE-aware emerge -> USE DEP aware depgraph
4861                 #"no downgrade" emerge
4862                 """
4863                 # Ensure that the dependencies of the same package
4864                 # are never processed more than once.
4865                 previously_added = pkg in self.digraph
4866
4867                 # select the correct /var database that we'll be checking against
4868                 vardbapi = self.trees[pkg.root]["vartree"].dbapi
4869                 pkgsettings = self.pkgsettings[pkg.root]
4870
4871                 arg_atoms = None
4872                 if True:
4873                         try:
4874                                 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
4875                         except portage.exception.InvalidDependString, e:
4876                                 if not pkg.installed:
4877                                         show_invalid_depstring_notice(
4878                                                 pkg, pkg.metadata["PROVIDE"], str(e))
4879                                         return 0
4880                                 del e
4881
4882                 if not pkg.onlydeps:
4883                         if not pkg.installed and \
4884                                 "empty" not in self.myparams and \
4885                                 vardbapi.match(pkg.slot_atom):
4886                                 # Increase the priority of dependencies on packages that
4887                                 # are being rebuilt. This optimizes merge order so that
4888                                 # dependencies are rebuilt/updated as soon as possible,
4889                                 # which is needed especially when emerge is called by
4890                                 # revdep-rebuild since dependencies may be affected by ABI
4891                                 # breakage that has rendered them useless. Don't adjust
4892                                 # priority here when in "empty" mode since all packages
4893                                 # are being merged in that case.
4894                                 priority.rebuild = True
4895
4896                         existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
4897                         slot_collision = False
4898                         if existing_node:
4899                                 existing_node_matches = pkg.cpv == existing_node.cpv
4900                                 if existing_node_matches and \
4901                                         pkg != existing_node and \
4902                                         dep.atom is not None:
4903                                         # Use package set for matching since it will match via
4904                                         # PROVIDE when necessary, while match_from_list does not.
4905                                         atom_set = InternalPackageSet(initial_atoms=[dep.atom])
4906                                         if not atom_set.findAtomForPackage(existing_node):
4907                                                 existing_node_matches = False
4908                                 if existing_node_matches:
4909                                         # The existing node can be reused.
4910                                         if arg_atoms:
4911                                                 for parent_atom in arg_atoms:
4912                                                         parent, atom = parent_atom
4913                                                         self.digraph.add(existing_node, parent,
4914                                                                 priority=priority)
4915                                                         self._add_parent_atom(existing_node, parent_atom)
4916                                         # If a direct circular dependency is not an unsatisfied
4917                                         # buildtime dependency then drop it here since otherwise
4918                                         # it can skew the merge order calculation in an unwanted
4919                                         # way.
4920                                         if existing_node != myparent or \
4921                                                 (priority.buildtime and not priority.satisfied):
4922                                                 self.digraph.addnode(existing_node, myparent,
4923                                                         priority=priority)
4924                                                 if dep.atom is not None and dep.parent is not None:
4925                                                         self._add_parent_atom(existing_node,
4926                                                                 (dep.parent, dep.atom))
4927                                         return 1
4928                                 else:
4929
4930                                         # A slot collision has occurred.  Sometimes this coincides
4931                                         # with unresolvable blockers, so the slot collision will be
4932                                         # shown later if there are no unresolvable blockers.
4933                                         self._add_slot_conflict(pkg)
4934                                         slot_collision = True
4935
4936                         if slot_collision:
4937                                 # Now add this node to the graph so that self.display()
4938                                 # can show use flags and --tree portage.output.  This node is
4939                                 # only being partially added to the graph.  It must not be
4940                                 # allowed to interfere with the other nodes that have been
4941                                 # added.  Do not overwrite data for existing nodes in
4942                                 # self.mydbapi since that data will be used for blocker
4943                                 # validation.
4944                                 # Even though the graph is now invalid, continue to process
4945                                 # dependencies so that things like --fetchonly can still
4946                                 # function despite collisions.
4947                                 pass
4948                         else:
4949                                 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
4950                                 self.mydbapi[pkg.root].cpv_inject(pkg)
4951
4952                         if not pkg.installed:
4953                                 # Allow this package to satisfy old-style virtuals in case it
4954                                 # doesn't already. Any pre-existing providers will be preferred
4955                                 # over this one.
4956                                 try:
4957                                         pkgsettings.setinst(pkg.cpv, pkg.metadata)
4958                                         # For consistency, also update the global virtuals.
4959                                         settings = self.roots[pkg.root].settings
4960                                         settings.unlock()
4961                                         settings.setinst(pkg.cpv, pkg.metadata)
4962                                         settings.lock()
4963                                 except portage.exception.InvalidDependString, e:
4964                                         show_invalid_depstring_notice(
4965                                                 pkg, pkg.metadata["PROVIDE"], str(e))
4966                                         del e
4967                                         return 0
4968
4969                 if arg_atoms:
4970                         self._set_nodes.add(pkg)
4971
4972                 # Do this even when addme is False (--onlydeps) so that the
4973                 # parent/child relationship is always known in case
4974                 # self._show_slot_collision_notice() needs to be called later.
4975                 self.digraph.add(pkg, myparent, priority=priority)
4976                 if dep.atom is not None and dep.parent is not None:
4977                         self._add_parent_atom(pkg, (dep.parent, dep.atom))
4978
4979                 if arg_atoms:
4980                         for parent_atom in arg_atoms:
4981                                 parent, atom = parent_atom
4982                                 self.digraph.add(pkg, parent, priority=priority)
4983                                 self._add_parent_atom(pkg, parent_atom)
4984
4985                 """ This section determines whether we go deeper into dependencies or not.
4986                     We want to go deeper on a few occasions:
4987                     Installing package A, we need to make sure package A's deps are met.
4988                     emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
4989                     If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
4990                 """
4991                 dep_stack = self._dep_stack
4992                 if "recurse" not in self.myparams:
4993                         return 1
4994                 elif pkg.installed and \
4995                         "deep" not in self.myparams:
4996                         dep_stack = self._ignored_deps
4997
4998                 self.spinner.update()
4999
5000                 if arg_atoms:
5001                         depth = 0
5002                 pkg.depth = depth
5003                 if not previously_added:
5004                         dep_stack.append(pkg)
5005                 return 1
5006
5007         def _add_parent_atom(self, pkg, parent_atom):
5008                 parent_atoms = self._parent_atoms.get(pkg)
5009                 if parent_atoms is None:
5010                         parent_atoms = set()
5011                         self._parent_atoms[pkg] = parent_atoms
5012                 parent_atoms.add(parent_atom)
5013
5014         def _add_slot_conflict(self, pkg):
5015                 self._slot_collision_nodes.add(pkg)
5016                 slot_key = (pkg.slot_atom, pkg.root)
5017                 slot_nodes = self._slot_collision_info.get(slot_key)
5018                 if slot_nodes is None:
5019                         slot_nodes = set()
5020                         slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5021                         self._slot_collision_info[slot_key] = slot_nodes
5022                 slot_nodes.add(pkg)
5023
5024         def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5025
5026                 mytype = pkg.type_name
5027                 myroot = pkg.root
5028                 mykey = pkg.cpv
5029                 metadata = pkg.metadata
5030                 myuse = pkg.use.enabled
5031                 jbigkey = pkg
5032                 depth = pkg.depth + 1
5033                 removal_action = "remove" in self.myparams
5034
5035                 edepend={}
5036                 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5037                 for k in depkeys:
5038                         edepend[k] = metadata[k]
5039
5040                 if not pkg.built and \
5041                         "--buildpkgonly" in self.myopts and \
5042                         "deep" not in self.myparams and \
5043                         "empty" not in self.myparams:
5044                         edepend["RDEPEND"] = ""
5045                         edepend["PDEPEND"] = ""
5046                 bdeps_satisfied = False
5047                 
5048                 if pkg.built and not removal_action:
5049                         if self.myopts.get("--with-bdeps", "n") == "y":
5050                                 # Pull in build time deps as requested, but marked them as
5051                                 # "satisfied" since they are not strictly required. This allows
5052                                 # more freedom in the merge order calculation for solving
5053                                 # circular dependencies. Don't convert to PDEPEND since that
5054                                 # could make --with-bdeps=y less effective if it is used to
5055                                 # adjust merge order to prevent built_with_use() calls from
5056                                 # failing.
5057                                 bdeps_satisfied = True
5058                         else:
5059                                 # built packages do not have build time dependencies.
5060                                 edepend["DEPEND"] = ""
5061
5062                 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5063                         edepend["DEPEND"] = ""
5064
5065                 deps = (
5066                         ("/", edepend["DEPEND"],
5067                                 self._priority(buildtime=True, satisfied=bdeps_satisfied)),
5068                         (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5069                         (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5070                 )
5071
5072                 debug = "--debug" in self.myopts
5073                 strict = mytype != "installed"
5074                 try:
5075                         for dep_root, dep_string, dep_priority in deps:
5076                                 if pkg.onlydeps:
5077                                         # Decrease priority so that --buildpkgonly
5078                                         # hasallzeros() works correctly.
5079                                         dep_priority = DepPriority()
5080                                 if not dep_string:
5081                                         continue
5082                                 if debug:
5083                                         print
5084                                         print "Parent:   ", jbigkey
5085                                         print "Depstring:", dep_string
5086                                         print "Priority:", dep_priority
5087                                 vardb = self.roots[dep_root].trees["vartree"].dbapi
5088                                 try:
5089                                         selected_atoms = self._select_atoms(dep_root,
5090                                                 dep_string, myuse=myuse, parent=pkg, strict=strict)
5091                                 except portage.exception.InvalidDependString, e:
5092                                         show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5093                                         return 0
5094                                 if debug:
5095                                         print "Candidates:", selected_atoms
5096
5097                                 for atom in selected_atoms:
5098                                         try:
5099
5100                                                 atom = portage.dep.Atom(atom)
5101
5102                                                 mypriority = dep_priority.copy()
5103                                                 if not atom.blocker and vardb.match(atom):
5104                                                         mypriority.satisfied = True
5105
5106                                                 if not self._add_dep(Dependency(atom=atom,
5107                                                         blocker=atom.blocker, depth=depth, parent=pkg,
5108                                                         priority=mypriority, root=dep_root),
5109                                                         allow_unsatisfied=allow_unsatisfied):
5110                                                         return 0
5111
5112                                         except portage.exception.InvalidAtom, e:
5113                                                 show_invalid_depstring_notice(
5114                                                         pkg, dep_string, str(e))
5115                                                 del e
5116                                                 if not pkg.installed:
5117                                                         return 0
5118
5119                                 if debug:
5120                                         print "Exiting...", jbigkey
5121                 except portage.exception.AmbiguousPackageName, e:
5122                         pkgs = e.args[0]
5123                         portage.writemsg("\n\n!!! An atom in the dependencies " + \
5124                                 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5125                         for cpv in pkgs:
5126                                 portage.writemsg("    %s\n" % cpv, noiselevel=-1)
5127                         portage.writemsg("\n", noiselevel=-1)
5128                         if mytype == "binary":
5129                                 portage.writemsg(
5130                                         "!!! This binary package cannot be installed: '%s'\n" % \
5131                                         mykey, noiselevel=-1)
5132                         elif mytype == "ebuild":
5133                                 portdb = self.roots[myroot].trees["porttree"].dbapi
5134                                 myebuild, mylocation = portdb.findname2(mykey)
5135                                 portage.writemsg("!!! This ebuild cannot be installed: " + \
5136                                         "'%s'\n" % myebuild, noiselevel=-1)
5137                         portage.writemsg("!!! Please notify the package maintainer " + \
5138                                 "that atoms must be fully-qualified.\n", noiselevel=-1)
5139                         return 0
5140                 return 1
5141
5142         def _priority(self, **kwargs):
5143                 if "remove" in self.myparams:
5144                         priority_constructor = UnmergeDepPriority
5145                 else:
5146                         priority_constructor = DepPriority
5147                 return priority_constructor(**kwargs)
5148
5149         def _dep_expand(self, root_config, atom_without_category):
5150                 """
5151                 @param root_config: a root config instance
5152                 @type root_config: RootConfig
5153                 @param atom_without_category: an atom without a category component
5154                 @type atom_without_category: String
5155                 @rtype: list
5156                 @returns: a list of atoms containing categories (possibly empty)
5157                 """
5158                 null_cp = portage.dep_getkey(insert_category_into_atom(
5159                         atom_without_category, "null"))
5160                 cat, atom_pn = portage.catsplit(null_cp)
5161
5162                 cp_set = set()
5163                 for db, pkg_type, built, installed, db_keys in \
5164                         self._filtered_trees[root_config.root]["dbs"]:
5165                         cp_set.update(db.cp_all())
5166                 for cp in list(cp_set):
5167                         cat, pn = portage.catsplit(cp)
5168                         if pn != atom_pn:
5169                                 cp_set.discard(cp)
5170                 deps = []
5171                 for cp in cp_set:
5172                         cat, pn = portage.catsplit(cp)
5173                         deps.append(insert_category_into_atom(
5174                                 atom_without_category, cat))
5175                 return deps
5176
5177         def _have_new_virt(self, root, atom_cp):
5178                 ret = False
5179                 for db, pkg_type, built, installed, db_keys in \
5180                         self._filtered_trees[root]["dbs"]:
5181                         if db.cp_list(atom_cp):
5182                                 ret = True
5183                                 break
5184                 return ret
5185
5186         def _iter_atoms_for_pkg(self, pkg):
5187                 # TODO: add multiple $ROOT support
5188                 if pkg.root != self.target_root:
5189                         return
5190                 atom_arg_map = self._atom_arg_map
5191                 root_config = self.roots[pkg.root]
5192                 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5193                         atom_cp = portage.dep_getkey(atom)
5194                         if atom_cp != pkg.cp and \
5195                                 self._have_new_virt(pkg.root, atom_cp):
5196                                 continue
5197                         visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5198                         visible_pkgs.reverse() # descending order
5199                         higher_slot = None
5200                         for visible_pkg in visible_pkgs:
5201                                 if visible_pkg.cp != atom_cp:
5202                                         continue
5203                                 if pkg >= visible_pkg:
5204                                         # This is descending order, and we're not
5205                                         # interested in any versions <= pkg given.
5206                                         break
5207                                 if pkg.slot_atom != visible_pkg.slot_atom:
5208                                         higher_slot = visible_pkg
5209                                         break
5210                         if higher_slot is not None:
5211                                 continue
5212                         for arg in atom_arg_map[(atom, pkg.root)]:
5213                                 if isinstance(arg, PackageArg) and \
5214                                         arg.package != pkg:
5215                                         continue
5216                                 yield arg, atom
5217
5218         def select_files(self, myfiles):
5219                 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5220                 appropriate depgraph and return a favorite list."""
5221                 debug = "--debug" in self.myopts
5222                 root_config = self.roots[self.target_root]
5223                 sets = root_config.sets
5224                 getSetAtoms = root_config.setconfig.getSetAtoms
5225                 myfavorites=[]
5226                 myroot = self.target_root
5227                 dbs = self._filtered_trees[myroot]["dbs"]
5228                 vardb = self.trees[myroot]["vartree"].dbapi
5229                 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5230                 portdb = self.trees[myroot]["porttree"].dbapi
5231                 bindb = self.trees[myroot]["bintree"].dbapi
5232                 pkgsettings = self.pkgsettings[myroot]
5233                 args = []
5234                 onlydeps = "--onlydeps" in self.myopts
5235                 lookup_owners = []
5236                 for x in myfiles:
5237                         ext = os.path.splitext(x)[1]
5238                         if ext==".tbz2":
5239                                 if not os.path.exists(x):
5240                                         if os.path.exists(
5241                                                 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5242                                                 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5243                                         elif os.path.exists(
5244                                                 os.path.join(pkgsettings["PKGDIR"], x)):
5245                                                 x = os.path.join(pkgsettings["PKGDIR"], x)
5246                                         else:
5247                                                 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5248                                                 print "!!! Please ensure the tbz2 exists as specified.\n"
5249                                                 return 0, myfavorites
5250                                 mytbz2=portage.xpak.tbz2(x)
5251                                 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5252                                 if os.path.realpath(x) != \
5253                                         os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5254                                         print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5255                                         return 0, myfavorites
5256                                 db_keys = list(bindb._aux_cache_keys)
5257                                 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5258                                 pkg = Package(type_name="binary", root_config=root_config,
5259                                         cpv=mykey, built=True, metadata=metadata,
5260                                         onlydeps=onlydeps)
5261                                 self._pkg_cache[pkg] = pkg
5262                                 args.append(PackageArg(arg=x, package=pkg,
5263                                         root_config=root_config))
5264                         elif ext==".ebuild":
5265                                 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5266                                 pkgdir = os.path.dirname(ebuild_path)
5267                                 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5268                                 cp = pkgdir[len(tree_root)+1:]
5269                                 e = portage.exception.PackageNotFound(
5270                                         ("%s is not in a valid portage tree " + \
5271                                         "hierarchy or does not exist") % x)
5272                                 if not portage.isvalidatom(cp):
5273                                         raise e
5274                                 cat = portage.catsplit(cp)[0]
5275                                 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5276                                 if not portage.isvalidatom("="+mykey):
5277                                         raise e
5278                                 ebuild_path = portdb.findname(mykey)
5279                                 if ebuild_path:
5280                                         if ebuild_path != os.path.join(os.path.realpath(tree_root),
5281                                                 cp, os.path.basename(ebuild_path)):
5282                                                 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5283                                                 return 0, myfavorites
5284                                         if mykey not in portdb.xmatch(
5285                                                 "match-visible", portage.dep_getkey(mykey)):
5286                                                 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5287                                                 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5288                                                 print colorize("BAD", "*** page for details.")
5289                                                 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5290                                                         "Continuing...")
5291                                 else:
5292                                         raise portage.exception.PackageNotFound(
5293                                                 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5294                                 db_keys = list(portdb._aux_cache_keys)
5295                                 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5296                                 pkg = Package(type_name="ebuild", root_config=root_config,
5297                                         cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5298                                 pkgsettings.setcpv(pkg)
5299                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5300                                 self._pkg_cache[pkg] = pkg
5301                                 args.append(PackageArg(arg=x, package=pkg,
5302                                         root_config=root_config))
5303                         elif x.startswith(os.path.sep):
5304                                 if not x.startswith(myroot):
5305                                         portage.writemsg(("\n\n!!! '%s' does not start with" + \
5306                                                 " $ROOT.\n") % x, noiselevel=-1)
5307                                         return 0, []
5308                                 # Queue these up since it's most efficient to handle
5309                                 # multiple files in a single iter_owners() call.
5310                                 lookup_owners.append(x)
5311                         else:
5312                                 if x in ("system", "world"):
5313                                         x = SETPREFIX + x
5314                                 if x.startswith(SETPREFIX):
5315                                         s = x[len(SETPREFIX):]
5316                                         if s not in sets:
5317                                                 raise portage.exception.PackageSetNotFound(s)
5318                                         if s in self._sets:
5319                                                 continue
5320                                         # Recursively expand sets so that containment tests in
5321                                         # self._get_parent_sets() properly match atoms in nested
5322                                         # sets (like if world contains system).
5323                                         expanded_set = InternalPackageSet(
5324                                                 initial_atoms=getSetAtoms(s))
5325                                         self._sets[s] = expanded_set
5326                                         args.append(SetArg(arg=x, set=expanded_set,
5327                                                 root_config=root_config))
5328                                         myfavorites.append(x)
5329                                         continue
5330                                 if not is_valid_package_atom(x):
5331                                         portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5332                                                 noiselevel=-1)
5333                                         portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5334                                         portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5335                                         return (0,[])
5336                                 # Don't expand categories or old-style virtuals here unless
5337                                 # necessary. Expansion of old-style virtuals here causes at
5338                                 # least the following problems:
5339                                 #   1) It's more difficult to determine which set(s) an atom
5340                                 #      came from, if any.
5341                                 #   2) It takes away freedom from the resolver to choose other
5342                                 #      possible expansions when necessary.
5343                                 if "/" in x:
5344                                         args.append(AtomArg(arg=x, atom=x,
5345                                                 root_config=root_config))
5346                                         continue
5347                                 expanded_atoms = self._dep_expand(root_config, x)
5348                                 installed_cp_set = set()
5349                                 for atom in expanded_atoms:
5350                                         atom_cp = portage.dep_getkey(atom)
5351                                         if vardb.cp_list(atom_cp):
5352                                                 installed_cp_set.add(atom_cp)
5353                                 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5354                                         installed_cp = iter(installed_cp_set).next()
5355                                         expanded_atoms = [atom for atom in expanded_atoms \
5356                                                 if portage.dep_getkey(atom) == installed_cp]
5357
5358                                 if len(expanded_atoms) > 1:
5359                                         print
5360                                         print
5361                                         ambiguous_package_name(x, expanded_atoms, root_config,
5362                                                 self.spinner, self.myopts)
5363                                         return False, myfavorites
5364                                 if expanded_atoms:
5365                                         atom = expanded_atoms[0]
5366                                 else:
5367                                         null_atom = insert_category_into_atom(x, "null")
5368                                         null_cp = portage.dep_getkey(null_atom)
5369                                         cat, atom_pn = portage.catsplit(null_cp)
5370                                         virts_p = root_config.settings.get_virts_p().get(atom_pn)
5371                                         if virts_p:
5372                                                 # Allow the depgraph to choose which virtual.
5373                                                 atom = insert_category_into_atom(x, "virtual")
5374                                         else:
5375                                                 atom = insert_category_into_atom(x, "null")
5376
5377                                 args.append(AtomArg(arg=x, atom=atom,
5378                                         root_config=root_config))
5379
5380                 if lookup_owners:
5381                         relative_paths = []
5382                         search_for_multiple = False
5383                         if len(lookup_owners) > 1:
5384                                 search_for_multiple = True
5385
5386                         for x in lookup_owners:
5387                                 if not search_for_multiple and os.path.isdir(x):
5388                                         search_for_multiple = True
5389                                 relative_paths.append(x[len(myroot):])
5390
5391                         owners = set()
5392                         for pkg, relative_path in \
5393                                 real_vardb._owners.iter_owners(relative_paths):
5394                                 owners.add(pkg.mycpv)
5395                                 if not search_for_multiple:
5396                                         break
5397
5398                         if not owners:
5399                                 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5400                                         "by any package.\n") % lookup_owners[0], noiselevel=-1)
5401                                 return 0, []
5402
5403                         for cpv in owners:
5404                                 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5405                                 if not slot:
5406                                         # portage now masks packages with missing slot, but it's
5407                                         # possible that one was installed by an older version
5408                                         atom = portage.cpv_getkey(cpv)
5409                                 else:
5410                                         atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5411                                 args.append(AtomArg(arg=atom, atom=atom,
5412                                         root_config=root_config))
5413
5414                 if "--update" in self.myopts:
5415                         # Enable greedy SLOT atoms for atoms given as arguments.
5416                         # This is currently disabled for sets since greedy SLOT
5417                         # atoms could be a property of the set itself.
5418                         greedy_atoms = []
5419                         for arg in args:
5420                                 # In addition to any installed slots, also try to pull
5421                                 # in the latest new slot that may be available.
5422                                 greedy_atoms.append(arg)
5423                                 if not isinstance(arg, (AtomArg, PackageArg)):
5424                                         continue
5425                                 atom_cp = portage.dep_getkey(arg.atom)
5426                                 slots = set()
5427                                 for cpv in vardb.match(arg.atom):
5428                                         slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5429                                 for slot in slots:
5430                                         greedy_atoms.append(
5431                                                 AtomArg(arg=arg.arg, atom="%s:%s" % (atom_cp, slot),
5432                                                         root_config=root_config))
5433                         args = greedy_atoms
5434                         del greedy_atoms
5435
5436                 # Create the "args" package set from atoms and
5437                 # packages given as arguments.
5438                 args_set = self._sets["args"]
5439                 for arg in args:
5440                         if not isinstance(arg, (AtomArg, PackageArg)):
5441                                 continue
5442                         myatom = arg.atom
5443                         if myatom in args_set:
5444                                 continue
5445                         args_set.add(myatom)
5446                         myfavorites.append(myatom)
5447                 self._set_atoms.update(chain(*self._sets.itervalues()))
5448                 atom_arg_map = self._atom_arg_map
5449                 for arg in args:
5450                         for atom in arg.set:
5451                                 atom_key = (atom, myroot)
5452                                 refs = atom_arg_map.get(atom_key)
5453                                 if refs is None:
5454                                         refs = []
5455                                         atom_arg_map[atom_key] = refs
5456                                         if arg not in refs:
5457                                                 refs.append(arg)
5458                 pprovideddict = pkgsettings.pprovideddict
5459                 if debug:
5460                         portage.writemsg("\n", noiselevel=-1)
5461                 # Order needs to be preserved since a feature of --nodeps
5462                 # is to allow the user to force a specific merge order.
5463                 args.reverse()
5464                 while args:
5465                         arg = args.pop()
5466                         for atom in arg.set:
5467                                 self.spinner.update()
5468                                 dep = Dependency(atom=atom, onlydeps=onlydeps,
5469                                         root=myroot, parent=arg)
5470                                 atom_cp = portage.dep_getkey(atom)
5471                                 try:
5472                                         pprovided = pprovideddict.get(portage.dep_getkey(atom))
5473                                         if pprovided and portage.match_from_list(atom, pprovided):
5474                                                 # A provided package has been specified on the command line.
5475                                                 self._pprovided_args.append((arg, atom))
5476                                                 continue
5477                                         if isinstance(arg, PackageArg):
5478                                                 if not self._add_pkg(arg.package, dep) or \
5479                                                         not self._create_graph():
5480                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5481                                                                 "dependencies for %s\n") % arg.arg)
5482                                                         return 0, myfavorites
5483                                                 continue
5484                                         if debug:
5485                                                 portage.writemsg("      Arg: %s\n     Atom: %s\n" % \
5486                                                         (arg, atom), noiselevel=-1)
5487                                         pkg, existing_node = self._select_package(
5488                                                 myroot, atom, onlydeps=onlydeps)
5489                                         if not pkg:
5490                                                 if not (isinstance(arg, SetArg) and \
5491                                                         arg.name in ("system", "world")):
5492                                                         self._unsatisfied_deps_for_display.append(
5493                                                                 ((myroot, atom), {}))
5494                                                         return 0, myfavorites
5495                                                 self._missing_args.append((arg, atom))
5496                                                 continue
5497                                         if atom_cp != pkg.cp:
5498                                                 # For old-style virtuals, we need to repeat the
5499                                                 # package.provided check against the selected package.
5500                                                 expanded_atom = atom.replace(atom_cp, pkg.cp)
5501                                                 pprovided = pprovideddict.get(pkg.cp)
5502                                                 if pprovided and \
5503                                                         portage.match_from_list(expanded_atom, pprovided):
5504                                                         # A provided package has been
5505                                                         # specified on the command line.
5506                                                         self._pprovided_args.append((arg, atom))
5507                                                         continue
5508                                         if pkg.installed and "selective" not in self.myparams:
5509                                                 self._unsatisfied_deps_for_display.append(
5510                                                         ((myroot, atom), {}))
5511                                                 # Previous behavior was to bail out in this case, but
5512                                                 # since the dep is satisfied by the installed package,
5513                                                 # it's more friendly to continue building the graph
5514                                                 # and just show a warning message. Therefore, only bail
5515                                                 # out here if the atom is not from either the system or
5516                                                 # world set.
5517                                                 if not (isinstance(arg, SetArg) and \
5518                                                         arg.name in ("system", "world")):
5519                                                         return 0, myfavorites
5520
5521                                         # Add the selected package to the graph as soon as possible
5522                                         # so that later dep_check() calls can use it as feedback
5523                                         # for making more consistent atom selections.
5524                                         if not self._add_pkg(pkg, dep):
5525                                                 if isinstance(arg, SetArg):
5526                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5527                                                                 "dependencies for %s from %s\n") % \
5528                                                                 (atom, arg.arg))
5529                                                 else:
5530                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5531                                                                 "dependencies for %s\n") % atom)
5532                                                 return 0, myfavorites
5533
5534                                 except portage.exception.MissingSignature, e:
5535                                         portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5536                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5537                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5538                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5539                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5540                                         return 0, myfavorites
5541                                 except portage.exception.InvalidSignature, e:
5542                                         portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5543                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5544                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5545                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5546                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5547                                         return 0, myfavorites
5548                                 except SystemExit, e:
5549                                         raise # Needed else can't exit
5550                                 except Exception, e:
5551                                         print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5552                                         print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5553                                         raise
5554
5555                 # Now that the root packages have been added to the graph,
5556                 # process the dependencies.
5557                 if not self._create_graph():
5558                         return 0, myfavorites
5559
5560                 missing=0
5561                 if "--usepkgonly" in self.myopts:
5562                         for xs in self.digraph.all_nodes():
5563                                 if not isinstance(xs, Package):
5564                                         continue
5565                                 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5566                                         if missing == 0:
5567                                                 print
5568                                         missing += 1
5569                                         print "Missing binary for:",xs[2]
5570
5571                 try:
5572                         self.altlist()
5573                 except self._unknown_internal_error:
5574                         return False, myfavorites
5575
5576                 # We're true here unless we are missing binaries.
5577                 return (not missing,myfavorites)
5578
5579         def _select_atoms_from_graph(self, *pargs, **kwargs):
5580                 """
5581                 Prefer atoms matching packages that have already been
5582                 added to the graph or those that are installed and have
5583                 not been scheduled for replacement.
5584                 """
5585                 kwargs["trees"] = self._graph_trees
5586                 return self._select_atoms_highest_available(*pargs, **kwargs)
5587
5588         def _select_atoms_highest_available(self, root, depstring,
5589                 myuse=None, parent=None, strict=True, trees=None):
5590                 """This will raise InvalidDependString if necessary. If trees is
5591                 None then self._filtered_trees is used."""
5592                 pkgsettings = self.pkgsettings[root]
5593                 if trees is None:
5594                         trees = self._filtered_trees
5595                 if True:
5596                         try:
5597                                 if parent is not None:
5598                                         trees[root]["parent"] = parent
5599                                 if not strict:
5600                                         portage.dep._dep_check_strict = False
5601                                 mycheck = portage.dep_check(depstring, None,
5602                                         pkgsettings, myuse=myuse,
5603                                         myroot=root, trees=trees)
5604                         finally:
5605                                 if parent is not None:
5606                                         trees[root].pop("parent")
5607                                 portage.dep._dep_check_strict = True
5608                         if not mycheck[0]:
5609                                 raise portage.exception.InvalidDependString(mycheck[1])
5610                         selected_atoms = mycheck[1]
5611                 return selected_atoms
5612
5613         def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5614                 atom = portage.dep.Atom(atom)
5615                 atom_set = InternalPackageSet(initial_atoms=(atom,))
5616                 atom_without_use = atom
5617                 if atom.use:
5618                         atom_without_use = portage.dep.remove_slot(atom)
5619                         if atom.slot:
5620                                 atom_without_use += ":" + atom.slot
5621                         atom_without_use = portage.dep.Atom(atom_without_use)
5622                 xinfo = '"%s"' % atom
5623                 if arg:
5624                         xinfo='"%s"' % arg
5625                 # Discard null/ from failed cpv_expand category expansion.
5626                 xinfo = xinfo.replace("null/", "")
5627                 masked_packages = []
5628                 missing_use = []
5629                 missing_licenses = []
5630                 have_eapi_mask = False
5631                 pkgsettings = self.pkgsettings[root]
5632                 implicit_iuse = pkgsettings._get_implicit_iuse()
5633                 root_config = self.roots[root]
5634                 portdb = self.roots[root].trees["porttree"].dbapi
5635                 dbs = self._filtered_trees[root]["dbs"]
5636                 for db, pkg_type, built, installed, db_keys in dbs:
5637                         if installed:
5638                                 continue
5639                         match = db.match
5640                         if hasattr(db, "xmatch"):
5641                                 cpv_list = db.xmatch("match-all", atom_without_use)
5642                         else:
5643                                 cpv_list = db.match(atom_without_use)
5644                         # descending order
5645                         cpv_list.reverse()
5646                         for cpv in cpv_list:
5647                                 metadata, mreasons  = get_mask_info(root_config, cpv,
5648                                         pkgsettings, db, pkg_type, built, installed, db_keys)
5649                                 if metadata is not None:
5650                                         pkg = Package(built=built, cpv=cpv,
5651                                                 installed=installed, metadata=metadata,
5652                                                 root_config=root_config)
5653                                         if pkg.cp != atom.cp:
5654                                                 # A cpv can be returned from dbapi.match() as an
5655                                                 # old-style virtual match even in cases when the
5656                                                 # package does not actually PROVIDE the virtual.
5657                                                 # Filter out any such false matches here.
5658                                                 if not atom_set.findAtomForPackage(pkg):
5659                                                         continue
5660                                         if atom.use and not mreasons:
5661                                                 missing_use.append(pkg)
5662                                                 continue
5663                                 masked_packages.append(
5664                                         (root_config, pkgsettings, cpv, metadata, mreasons))
5665
5666                 missing_use_reasons = []
5667                 missing_iuse_reasons = []
5668                 for pkg in missing_use:
5669                         use = pkg.use.enabled
5670                         iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
5671                         iuse_re = re.compile("^(%s)$" % "|".join(iuse))
5672                         missing_iuse = []
5673                         for x in atom.use.required:
5674                                 if iuse_re.match(x) is None:
5675                                         missing_iuse.append(x)
5676                         mreasons = []
5677                         if missing_iuse:
5678                                 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
5679                                 missing_iuse_reasons.append((pkg, mreasons))
5680                         else:
5681                                 need_enable = sorted(atom.use.enabled.difference(use))
5682                                 need_disable = sorted(atom.use.disabled.intersection(use))
5683                                 if need_enable or need_disable:
5684                                         changes = []
5685                                         changes.extend(colorize("red", "+" + x) \
5686                                                 for x in need_enable)
5687                                         changes.extend(colorize("blue", "-" + x) \
5688                                                 for x in need_disable)
5689                                         mreasons.append("Change USE: %s" % " ".join(changes))
5690                                         missing_use_reasons.append((pkg, mreasons))
5691
5692                 if missing_iuse_reasons and not missing_use_reasons:
5693                         missing_use_reasons = missing_iuse_reasons
5694                 elif missing_use_reasons:
5695                         # Only show the latest version.
5696                         del missing_use_reasons[1:]
5697
5698                 if missing_use_reasons:
5699                         print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
5700                         print "!!! One of the following packages is required to complete your request:"
5701                         for pkg, mreasons in missing_use_reasons:
5702                                 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
5703
5704                 elif masked_packages:
5705                         print "\n!!! " + \
5706                                 colorize("BAD", "All ebuilds that could satisfy ") + \
5707                                 colorize("INFORM", xinfo) + \
5708                                 colorize("BAD", " have been masked.")
5709                         print "!!! One of the following masked packages is required to complete your request:"
5710                         have_eapi_mask = show_masked_packages(masked_packages)
5711                         if have_eapi_mask:
5712                                 print
5713                                 msg = ("The current version of portage supports " + \
5714                                         "EAPI '%s'. You must upgrade to a newer version" + \
5715                                         " of portage before EAPI masked packages can" + \
5716                                         " be installed.") % portage.const.EAPI
5717                                 from textwrap import wrap
5718                                 for line in wrap(msg, 75):
5719                                         print line
5720                         print
5721                         show_mask_docs()
5722                 else:
5723                         print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
5724
5725                 # Show parent nodes and the argument that pulled them in.
5726                 traversed_nodes = set()
5727                 node = myparent
5728                 msg = []
5729                 while node is not None:
5730                         traversed_nodes.add(node)
5731                         msg.append('(dependency required by "%s" [%s])' % \
5732                                 (colorize('INFORM', str(node.cpv)), node.type_name))
5733                         # When traversing to parents, prefer arguments over packages
5734                         # since arguments are root nodes. Never traverse the same
5735                         # package twice, in order to prevent an infinite loop.
5736                         selected_parent = None
5737                         for parent in self.digraph.parent_nodes(node):
5738                                 if isinstance(parent, DependencyArg):
5739                                         msg.append('(dependency required by "%s" [argument])' % \
5740                                                 (colorize('INFORM', str(parent))))
5741                                         selected_parent = None
5742                                         break
5743                                 if parent not in traversed_nodes:
5744                                         selected_parent = parent
5745                         node = selected_parent
5746                 for line in msg:
5747                         print line
5748
5749                 print
5750
5751         def _select_pkg_highest_available(self, root, atom, onlydeps=False):
5752                 cache_key = (root, atom, onlydeps)
5753                 ret = self._highest_pkg_cache.get(cache_key)
5754                 if ret is not None:
5755                         pkg, existing = ret
5756                         if pkg and not existing:
5757                                 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
5758                                 if existing and existing == pkg:
5759                                         # Update the cache to reflect that the
5760                                         # package has been added to the graph.
5761                                         ret = pkg, pkg
5762                                         self._highest_pkg_cache[cache_key] = ret
5763                         return ret
5764                 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
5765                 self._highest_pkg_cache[cache_key] = ret
5766                 pkg, existing = ret
5767                 if pkg is not None:
5768                         settings = pkg.root_config.settings
5769                         if visible(settings, pkg) and not (pkg.installed and \
5770                                 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
5771                                 pkg.root_config.visible_pkgs.cpv_inject(pkg)
5772                 return ret
5773
5774         def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
5775                 root_config = self.roots[root]
5776                 pkgsettings = self.pkgsettings[root]
5777                 dbs = self._filtered_trees[root]["dbs"]
5778                 vardb = self.roots[root].trees["vartree"].dbapi
5779                 portdb = self.roots[root].trees["porttree"].dbapi
5780                 # List of acceptable packages, ordered by type preference.
5781                 matched_packages = []
5782                 highest_version = None
5783                 if not isinstance(atom, portage.dep.Atom):
5784                         atom = portage.dep.Atom(atom)
5785                 atom_cp = atom.cp
5786                 atom_set = InternalPackageSet(initial_atoms=(atom,))
5787                 existing_node = None
5788                 myeb = None
5789                 usepkgonly = "--usepkgonly" in self.myopts
5790                 empty = "empty" in self.myparams
5791                 selective = "selective" in self.myparams
5792                 reinstall = False
5793                 noreplace = "--noreplace" in self.myopts
5794                 # Behavior of the "selective" parameter depends on
5795                 # whether or not a package matches an argument atom.
5796                 # If an installed package provides an old-style
5797                 # virtual that is no longer provided by an available
5798                 # package, the installed package may match an argument
5799                 # atom even though none of the available packages do.
5800                 # Therefore, "selective" logic does not consider
5801                 # whether or not an installed package matches an
5802                 # argument atom. It only considers whether or not
5803                 # available packages match argument atoms, which is
5804                 # represented by the found_available_arg flag.
5805                 found_available_arg = False
5806                 for find_existing_node in True, False:
5807                         if existing_node:
5808                                 break
5809                         for db, pkg_type, built, installed, db_keys in dbs:
5810                                 if existing_node:
5811                                         break
5812                                 if installed and not find_existing_node:
5813                                         want_reinstall = reinstall or empty or \
5814                                                 (found_available_arg and not selective)
5815                                         if want_reinstall and matched_packages:
5816                                                 continue
5817                                 if hasattr(db, "xmatch"):
5818                                         cpv_list = db.xmatch("match-all", atom)
5819                                 else:
5820                                         cpv_list = db.match(atom)
5821
5822                                 # USE=multislot can make an installed package appear as if
5823                                 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
5824                                 # won't do any good as long as USE=multislot is enabled since
5825                                 # the newly built package still won't have the expected slot.
5826                                 # Therefore, assume that such SLOT dependencies are already
5827                                 # satisfied rather than forcing a rebuild.
5828                                 if installed and not cpv_list and atom.slot:
5829                                         for cpv in db.match(atom.cp):
5830                                                 slot_available = False
5831                                                 for other_db, other_type, other_built, \
5832                                                         other_installed, other_keys in dbs:
5833                                                         try:
5834                                                                 if atom.slot == \
5835                                                                         other_db.aux_get(cpv, ["SLOT"])[0]:
5836                                                                         slot_available = True
5837                                                                         break
5838                                                         except KeyError:
5839                                                                 pass
5840                                                 if not slot_available:
5841                                                         continue
5842                                                 inst_pkg = self._pkg(cpv, "installed",
5843                                                         root_config, installed=installed)
5844                                                 # Remove the slot from the atom and verify that
5845                                                 # the package matches the resulting atom.
5846                                                 atom_without_slot = portage.dep.remove_slot(atom)
5847                                                 if atom.use:
5848                                                         atom_without_slot += str(atom.use)
5849                                                 atom_without_slot = portage.dep.Atom(atom_without_slot)
5850                                                 if portage.match_from_list(
5851                                                         atom_without_slot, [inst_pkg]):
5852                                                         cpv_list = [inst_pkg.cpv]
5853                                                 break
5854
5855                                 if not cpv_list:
5856                                         continue
5857                                 pkg_status = "merge"
5858                                 if installed or onlydeps:
5859                                         pkg_status = "nomerge"
5860                                 # descending order
5861                                 cpv_list.reverse()
5862                                 for cpv in cpv_list:
5863                                         # Make --noreplace take precedence over --newuse.
5864                                         if not installed and noreplace and \
5865                                                 cpv in vardb.match(atom):
5866                                                 # If the installed version is masked, it may
5867                                                 # be necessary to look at lower versions,
5868                                                 # in case there is a visible downgrade.
5869                                                 continue
5870                                         reinstall_for_flags = None
5871                                         cache_key = (pkg_type, root, cpv, pkg_status)
5872                                         calculated_use = True
5873                                         pkg = self._pkg_cache.get(cache_key)
5874                                         if pkg is None:
5875                                                 calculated_use = False
5876                                                 try:
5877                                                         metadata = izip(db_keys, db.aux_get(cpv, db_keys))
5878                                                 except KeyError:
5879                                                         continue
5880                                                 pkg = Package(built=built, cpv=cpv,
5881                                                         installed=installed, metadata=metadata,
5882                                                         onlydeps=onlydeps, root_config=root_config,
5883                                                         type_name=pkg_type)
5884                                                 metadata = pkg.metadata
5885                                                 if not built and ("?" in metadata["LICENSE"] or \
5886                                                         "?" in metadata["PROVIDE"]):
5887                                                         # This is avoided whenever possible because
5888                                                         # it's expensive. It only needs to be done here
5889                                                         # if it has an effect on visibility.
5890                                                         pkgsettings.setcpv(pkg)
5891                                                         metadata["USE"] = pkgsettings["PORTAGE_USE"]
5892                                                         calculated_use = True
5893                                                 self._pkg_cache[pkg] = pkg
5894
5895                                         if not installed or (built and matched_packages):
5896                                                 # Only enforce visibility on installed packages
5897                                                 # if there is at least one other visible package
5898                                                 # available. By filtering installed masked packages
5899                                                 # here, packages that have been masked since they
5900                                                 # were installed can be automatically downgraded
5901                                                 # to an unmasked version.
5902                                                 try:
5903                                                         if not visible(pkgsettings, pkg):
5904                                                                 continue
5905                                                 except portage.exception.InvalidDependString:
5906                                                         if not installed:
5907                                                                 continue
5908
5909                                                 # Enable upgrade or downgrade to a version
5910                                                 # with visible KEYWORDS when the installed
5911                                                 # version is masked by KEYWORDS, but never
5912                                                 # reinstall the same exact version only due
5913                                                 # to a KEYWORDS mask.
5914                                                 if built and matched_packages:
5915
5916                                                         different_version = None
5917                                                         for avail_pkg in matched_packages:
5918                                                                 if not portage.dep.cpvequal(
5919                                                                         pkg.cpv, avail_pkg.cpv):
5920                                                                         different_version = avail_pkg
5921                                                                         break
5922                                                         if different_version is not None:
5923
5924                                                                 if installed and \
5925                                                                         pkgsettings._getMissingKeywords(
5926                                                                         pkg.cpv, pkg.metadata):
5927                                                                         continue
5928
5929                                                                 # If the ebuild no longer exists or it's
5930                                                                 # keywords have been dropped, reject built
5931                                                                 # instances (installed or binary).
5932                                                                 # If --usepkgonly is enabled, assume that
5933                                                                 # the ebuild status should be ignored.
5934                                                                 if not usepkgonly:
5935                                                                         try:
5936                                                                                 pkg_eb = self._pkg(
5937                                                                                         pkg.cpv, "ebuild", root_config)
5938                                                                         except portage.exception.PackageNotFound:
5939                                                                                 continue
5940                                                                         else:
5941                                                                                 if not visible(pkgsettings, pkg_eb):
5942                                                                                         continue
5943
5944                                         if not pkg.built and not calculated_use:
5945                                                 # This is avoided whenever possible because
5946                                                 # it's expensive.
5947                                                 pkgsettings.setcpv(pkg)
5948                                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5949
5950                                         if pkg.cp != atom.cp:
5951                                                 # A cpv can be returned from dbapi.match() as an
5952                                                 # old-style virtual match even in cases when the
5953                                                 # package does not actually PROVIDE the virtual.
5954                                                 # Filter out any such false matches here.
5955                                                 if not atom_set.findAtomForPackage(pkg):
5956                                                         continue
5957
5958                                         myarg = None
5959                                         if root == self.target_root:
5960                                                 try:
5961                                                         # Ebuild USE must have been calculated prior
5962                                                         # to this point, in case atoms have USE deps.
5963                                                         myarg = self._iter_atoms_for_pkg(pkg).next()
5964                                                 except StopIteration:
5965                                                         pass
5966                                                 except portage.exception.InvalidDependString:
5967                                                         if not installed:
5968                                                                 # masked by corruption
5969                                                                 continue
5970                                         if not installed and myarg:
5971                                                 found_available_arg = True
5972
5973                                         if atom.use and not pkg.built:
5974                                                 use = pkg.use.enabled
5975                                                 if atom.use.enabled.difference(use):
5976                                                         continue
5977                                                 if atom.use.disabled.intersection(use):
5978                                                         continue
5979                                         if pkg.cp == atom_cp:
5980                                                 if highest_version is None:
5981                                                         highest_version = pkg
5982                                                 elif pkg > highest_version:
5983                                                         highest_version = pkg
5984                                         # At this point, we've found the highest visible
5985                                         # match from the current repo. Any lower versions
5986                                         # from this repo are ignored, so this so the loop
5987                                         # will always end with a break statement below
5988                                         # this point.
5989                                         if find_existing_node:
5990                                                 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
5991                                                 if not e_pkg:
5992                                                         break
5993                                                 if portage.dep.match_from_list(atom, [e_pkg]):
5994                                                         if highest_version and \
5995                                                                 e_pkg.cp == atom_cp and \
5996                                                                 e_pkg < highest_version and \
5997                                                                 e_pkg.slot_atom != highest_version.slot_atom:
5998                                                                 # There is a higher version available in a
5999                                                                 # different slot, so this existing node is
6000                                                                 # irrelevant.
6001                                                                 pass
6002                                                         else:
6003                                                                 matched_packages.append(e_pkg)
6004                                                                 existing_node = e_pkg
6005                                                 break
6006                                         # Compare built package to current config and
6007                                         # reject the built package if necessary.
6008                                         if built and not installed and \
6009                                                 ("--newuse" in self.myopts or \
6010                                                 "--reinstall" in self.myopts):
6011                                                 iuses = pkg.iuse.all
6012                                                 old_use = pkg.use.enabled
6013                                                 if myeb:
6014                                                         pkgsettings.setcpv(myeb)
6015                                                 else:
6016                                                         pkgsettings.setcpv(pkg)
6017                                                 now_use = pkgsettings["PORTAGE_USE"].split()
6018                                                 forced_flags = set()
6019                                                 forced_flags.update(pkgsettings.useforce)
6020                                                 forced_flags.update(pkgsettings.usemask)
6021                                                 cur_iuse = iuses
6022                                                 if myeb and not usepkgonly:
6023                                                         cur_iuse = myeb.iuse.all
6024                                                 if self._reinstall_for_flags(forced_flags,
6025                                                         old_use, iuses,
6026                                                         now_use, cur_iuse):
6027                                                         break
6028                                         # Compare current config to installed package
6029                                         # and do not reinstall if possible.
6030                                         if not installed and \
6031                                                 ("--newuse" in self.myopts or \
6032                                                 "--reinstall" in self.myopts) and \
6033                                                 cpv in vardb.match(atom):
6034                                                 pkgsettings.setcpv(pkg)
6035                                                 forced_flags = set()
6036                                                 forced_flags.update(pkgsettings.useforce)
6037                                                 forced_flags.update(pkgsettings.usemask)
6038                                                 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6039                                                 old_iuse = set(filter_iuse_defaults(
6040                                                         vardb.aux_get(cpv, ["IUSE"])[0].split()))
6041                                                 cur_use = pkgsettings["PORTAGE_USE"].split()
6042                                                 cur_iuse = pkg.iuse.all
6043                                                 reinstall_for_flags = \
6044                                                         self._reinstall_for_flags(
6045                                                         forced_flags, old_use, old_iuse,
6046                                                         cur_use, cur_iuse)
6047                                                 if reinstall_for_flags:
6048                                                         reinstall = True
6049                                         if not built:
6050                                                 myeb = pkg
6051                                         matched_packages.append(pkg)
6052                                         if reinstall_for_flags:
6053                                                 self._reinstall_nodes[pkg] = \
6054                                                         reinstall_for_flags
6055                                         break
6056
6057                 if not matched_packages:
6058                         return None, None
6059
6060                 if "--debug" in self.myopts:
6061                         for pkg in matched_packages:
6062                                 portage.writemsg("%s %s\n" % \
6063                                         ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6064
6065                 # Filter out any old-style virtual matches if they are
6066                 # mixed with new-style virtual matches.
6067                 cp = portage.dep_getkey(atom)
6068                 if len(matched_packages) > 1 and \
6069                         "virtual" == portage.catsplit(cp)[0]:
6070                         for pkg in matched_packages:
6071                                 if pkg.cp != cp:
6072                                         continue
6073                                 # Got a new-style virtual, so filter
6074                                 # out any old-style virtuals.
6075                                 matched_packages = [pkg for pkg in matched_packages \
6076                                         if pkg.cp == cp]
6077                                 break
6078
6079                 if len(matched_packages) > 1:
6080                         bestmatch = portage.best(
6081                                 [pkg.cpv for pkg in matched_packages])
6082                         matched_packages = [pkg for pkg in matched_packages \
6083                                 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6084
6085                 # ordered by type preference ("ebuild" type is the last resort)
6086                 return  matched_packages[-1], existing_node
6087
6088         def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6089                 """
6090                 Select packages that have already been added to the graph or
6091                 those that are installed and have not been scheduled for
6092                 replacement.
6093                 """
6094                 graph_db = self._graph_trees[root]["porttree"].dbapi
6095                 matches = graph_db.match(atom)
6096                 if not matches:
6097                         return None, None
6098                 cpv = matches[-1] # highest match
6099                 slot_atom = "%s:%s" % (portage.cpv_getkey(cpv),
6100                         graph_db.aux_get(cpv, ["SLOT"])[0])
6101                 e_pkg = self._slot_pkg_map[root].get(slot_atom)
6102                 if e_pkg:
6103                         return e_pkg, e_pkg
6104                 # Since this cpv exists in the graph_db,
6105                 # we must have a cached Package instance.
6106                 cache_key = ("installed", root, cpv, "nomerge")
6107                 return (self._pkg_cache[cache_key], None)
6108
6109         def _complete_graph(self):
6110                 """
6111                 Add any deep dependencies of required sets (args, system, world) that
6112                 have not been pulled into the graph yet. This ensures that the graph
6113                 is consistent such that initially satisfied deep dependencies are not
6114                 broken in the new graph. Initially unsatisfied dependencies are
6115                 irrelevant since we only want to avoid breaking dependencies that are
6116                 intially satisfied.
6117
6118                 Since this method can consume enough time to disturb users, it is
6119                 currently only enabled by the --complete-graph option.
6120                 """
6121                 if "--buildpkgonly" in self.myopts or \
6122                         "recurse" not in self.myparams:
6123                         return 1
6124
6125                 if "complete" not in self.myparams:
6126                         # Skip this to avoid consuming enough time to disturb users.
6127                         return 1
6128
6129                 # Put the depgraph into a mode that causes it to only
6130                 # select packages that have already been added to the
6131                 # graph or those that are installed and have not been
6132                 # scheduled for replacement. Also, toggle the "deep"
6133                 # parameter so that all dependencies are traversed and
6134                 # accounted for.
6135                 self._select_atoms = self._select_atoms_from_graph
6136                 self._select_package = self._select_pkg_from_graph
6137                 already_deep = "deep" in self.myparams
6138                 if not already_deep:
6139                         self.myparams.add("deep")
6140
6141                 for root in self.roots:
6142                         required_set_names = self._required_set_names.copy()
6143                         if root == self.target_root and \
6144                                 (already_deep or "empty" in self.myparams):
6145                                 required_set_names.difference_update(self._sets)
6146                         if not required_set_names and not self._ignored_deps:
6147                                 continue
6148                         root_config = self.roots[root]
6149                         setconfig = root_config.setconfig
6150                         args = []
6151                         # Reuse existing SetArg instances when available.
6152                         for arg in self.digraph.root_nodes():
6153                                 if not isinstance(arg, SetArg):
6154                                         continue
6155                                 if arg.root_config != root_config:
6156                                         continue
6157                                 if arg.name in required_set_names:
6158                                         args.append(arg)
6159                                         required_set_names.remove(arg.name)
6160                         # Create new SetArg instances only when necessary.
6161                         for s in required_set_names:
6162                                 expanded_set = InternalPackageSet(
6163                                         initial_atoms=setconfig.getSetAtoms(s))
6164                                 atom = SETPREFIX + s
6165                                 args.append(SetArg(arg=atom, set=expanded_set,
6166                                         root_config=root_config))
6167                         vardb = root_config.trees["vartree"].dbapi
6168                         for arg in args:
6169                                 for atom in arg.set:
6170                                         self._dep_stack.append(
6171                                                 Dependency(atom=atom, root=root, parent=arg))
6172                         if self._ignored_deps:
6173                                 self._dep_stack.extend(self._ignored_deps)
6174                                 self._ignored_deps = []
6175                         if not self._create_graph(allow_unsatisfied=True):
6176                                 return 0
6177                         # Check the unsatisfied deps to see if any initially satisfied deps
6178                         # will become unsatisfied due to an upgrade. Initially unsatisfied
6179                         # deps are irrelevant since we only want to avoid breaking deps
6180                         # that are initially satisfied.
6181                         while self._unsatisfied_deps:
6182                                 dep = self._unsatisfied_deps.pop()
6183                                 matches = vardb.match_pkgs(dep.atom)
6184                                 if not matches:
6185                                         self._initially_unsatisfied_deps.append(dep)
6186                                         continue
6187                                 # An scheduled installation broke a deep dependency.
6188                                 # Add the installed package to the graph so that it
6189                                 # will be appropriately reported as a slot collision
6190                                 # (possibly solvable via backtracking).
6191                                 pkg = matches[-1] # highest match
6192                                 if not self._add_pkg(pkg, dep):
6193                                         return 0
6194                                 if not self._create_graph(allow_unsatisfied=True):
6195                                         return 0
6196                 return 1
6197
6198         def _pkg(self, cpv, type_name, root_config, installed=False):
6199                 """
6200                 Get a package instance from the cache, or create a new
6201                 one if necessary. Raises KeyError from aux_get if it
6202                 failures for some reason (package does not exist or is
6203                 corrupt).
6204                 """
6205                 operation = "merge"
6206                 if installed:
6207                         operation = "nomerge"
6208                 pkg = self._pkg_cache.get(
6209                         (type_name, root_config.root, cpv, operation))
6210                 if pkg is None:
6211                         tree_type = self.pkg_tree_map[type_name]
6212                         db = root_config.trees[tree_type].dbapi
6213                         db_keys = list(self._trees_orig[root_config.root][
6214                                 tree_type].dbapi._aux_cache_keys)
6215                         try:
6216                                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6217                         except KeyError:
6218                                 raise portage.exception.PackageNotFound(cpv)
6219                         pkg = Package(cpv=cpv, metadata=metadata,
6220                                 root_config=root_config, installed=installed)
6221                         if type_name == "ebuild":
6222                                 settings = self.pkgsettings[root_config.root]
6223                                 settings.setcpv(pkg)
6224                                 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6225                         self._pkg_cache[pkg] = pkg
6226                 return pkg
6227
6228         def validate_blockers(self):
6229                 """Remove any blockers from the digraph that do not match any of the
6230                 packages within the graph.  If necessary, create hard deps to ensure
6231                 correct merge order such that mutually blocking packages are never
6232                 installed simultaneously."""
6233
6234                 if "--buildpkgonly" in self.myopts or \
6235                         "--nodeps" in self.myopts:
6236                         return True
6237
6238                 #if "deep" in self.myparams:
6239                 if True:
6240                         # Pull in blockers from all installed packages that haven't already
6241                         # been pulled into the depgraph.  This is not enabled by default
6242                         # due to the performance penalty that is incurred by all the
6243                         # additional dep_check calls that are required.
6244
6245                         dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6246                         for myroot in self.trees:
6247                                 vardb = self.trees[myroot]["vartree"].dbapi
6248                                 portdb = self.trees[myroot]["porttree"].dbapi
6249                                 pkgsettings = self.pkgsettings[myroot]
6250                                 final_db = self.mydbapi[myroot]
6251
6252                                 blocker_cache = BlockerCache(myroot, vardb)
6253                                 stale_cache = set(blocker_cache)
6254                                 for pkg in vardb:
6255                                         cpv = pkg.cpv
6256                                         stale_cache.discard(cpv)
6257                                         pkg_in_graph = self.digraph.contains(pkg)
6258
6259                                         # Check for masked installed packages. Only warn about
6260                                         # packages that are in the graph in order to avoid warning
6261                                         # about those that will be automatically uninstalled during
6262                                         # the merge process or by --depclean.
6263                                         if pkg in final_db:
6264                                                 if pkg_in_graph and not visible(pkgsettings, pkg):
6265                                                         self._masked_installed.add(pkg)
6266
6267                                         blocker_atoms = None
6268                                         blockers = None
6269                                         if pkg_in_graph:
6270                                                 blockers = []
6271                                                 try:
6272                                                         blockers.extend(
6273                                                                 self._blocker_parents.child_nodes(pkg))
6274                                                 except KeyError:
6275                                                         pass
6276                                                 try:
6277                                                         blockers.extend(
6278                                                                 self._irrelevant_blockers.child_nodes(pkg))
6279                                                 except KeyError:
6280                                                         pass
6281                                         if blockers is not None:
6282                                                 blockers = set(str(blocker.atom) \
6283                                                         for blocker in blockers)
6284
6285                                         # If this node has any blockers, create a "nomerge"
6286                                         # node for it so that they can be enforced.
6287                                         self.spinner.update()
6288                                         blocker_data = blocker_cache.get(cpv)
6289                                         if blocker_data is not None and \
6290                                                 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6291                                                 blocker_data = None
6292
6293                                         # If blocker data from the graph is available, use
6294                                         # it to validate the cache and update the cache if
6295                                         # it seems invalid.
6296                                         if blocker_data is not None and \
6297                                                 blockers is not None:
6298                                                 if not blockers.symmetric_difference(
6299                                                         blocker_data.atoms):
6300                                                         continue
6301                                                 blocker_data = None
6302
6303                                         if blocker_data is None and \
6304                                                 blockers is not None:
6305                                                 # Re-use the blockers from the graph.
6306                                                 blocker_atoms = sorted(blockers)
6307                                                 counter = long(pkg.metadata["COUNTER"])
6308                                                 blocker_data = \
6309                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6310                                                 blocker_cache[pkg.cpv] = blocker_data
6311                                                 continue
6312
6313                                         if blocker_data:
6314                                                 blocker_atoms = blocker_data.atoms
6315                                         else:
6316                                                 # Use aux_get() to trigger FakeVartree global
6317                                                 # updates on *DEPEND when appropriate.
6318                                                 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6319                                                 # It is crucial to pass in final_db here in order to
6320                                                 # optimize dep_check calls by eliminating atoms via
6321                                                 # dep_wordreduce and dep_eval calls.
6322                                                 try:
6323                                                         portage.dep._dep_check_strict = False
6324                                                         try:
6325                                                                 success, atoms = portage.dep_check(depstr,
6326                                                                         final_db, pkgsettings, myuse=pkg.use.enabled,
6327                                                                         trees=self._graph_trees, myroot=myroot)
6328                                                         except Exception, e:
6329                                                                 if isinstance(e, SystemExit):
6330                                                                         raise
6331                                                                 # This is helpful, for example, if a ValueError
6332                                                                 # is thrown from cpv_expand due to multiple
6333                                                                 # matches (this can happen if an atom lacks a
6334                                                                 # category).
6335                                                                 show_invalid_depstring_notice(
6336                                                                         pkg, depstr, str(e))
6337                                                                 del e
6338                                                                 raise
6339                                                 finally:
6340                                                         portage.dep._dep_check_strict = True
6341                                                 if not success:
6342                                                         replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6343                                                         if replacement_pkg and \
6344                                                                 replacement_pkg[0].operation == "merge":
6345                                                                 # This package is being replaced anyway, so
6346                                                                 # ignore invalid dependencies so as not to
6347                                                                 # annoy the user too much (otherwise they'd be
6348                                                                 # forced to manually unmerge it first).
6349                                                                 continue
6350                                                         show_invalid_depstring_notice(pkg, depstr, atoms)
6351                                                         return False
6352                                                 blocker_atoms = [myatom for myatom in atoms \
6353                                                         if myatom.startswith("!")]
6354                                                 blocker_atoms.sort()
6355                                                 counter = long(pkg.metadata["COUNTER"])
6356                                                 blocker_cache[cpv] = \
6357                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6358                                         if blocker_atoms:
6359                                                 try:
6360                                                         for atom in blocker_atoms:
6361                                                                 blocker = Blocker(atom=portage.dep.Atom(atom),
6362                                                                         eapi=pkg.metadata["EAPI"], root=myroot)
6363                                                                 self._blocker_parents.add(blocker, pkg)
6364                                                 except portage.exception.InvalidAtom, e:
6365                                                         depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6366                                                         show_invalid_depstring_notice(
6367                                                                 pkg, depstr, "Invalid Atom: %s" % (e,))
6368                                                         return False
6369                                 for cpv in stale_cache:
6370                                         del blocker_cache[cpv]
6371                                 blocker_cache.flush()
6372                                 del blocker_cache
6373
6374                 # Discard any "uninstall" tasks scheduled by previous calls
6375                 # to this method, since those tasks may not make sense given
6376                 # the current graph state.
6377                 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6378                 if previous_uninstall_tasks:
6379                         self._blocker_uninstalls = digraph()
6380                         self.digraph.difference_update(previous_uninstall_tasks)
6381
6382                 for blocker in self._blocker_parents.leaf_nodes():
6383                         self.spinner.update()
6384                         root_config = self.roots[blocker.root]
6385                         virtuals = root_config.settings.getvirtuals()
6386                         myroot = blocker.root
6387                         initial_db = self.trees[myroot]["vartree"].dbapi
6388                         final_db = self.mydbapi[myroot]
6389                         
6390                         provider_virtual = False
6391                         if blocker.cp in virtuals and \
6392                                 not self._have_new_virt(blocker.root, blocker.cp):
6393                                 provider_virtual = True
6394
6395                         if provider_virtual:
6396                                 atoms = []
6397                                 for provider_entry in virtuals[blocker.cp]:
6398                                         provider_cp = \
6399                                                 portage.dep_getkey(provider_entry)
6400                                         atoms.append(blocker.atom.replace(
6401                                                 blocker.cp, provider_cp))
6402                         else:
6403                                 atoms = [blocker.atom]
6404
6405                         blocked_initial = []
6406                         for atom in atoms:
6407                                 blocked_initial.extend(initial_db.match_pkgs(atom))
6408
6409                         blocked_final = []
6410                         for atom in atoms:
6411                                 blocked_final.extend(final_db.match_pkgs(atom))
6412
6413                         if not blocked_initial and not blocked_final:
6414                                 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6415                                 self._blocker_parents.remove(blocker)
6416                                 # Discard any parents that don't have any more blockers.
6417                                 for pkg in parent_pkgs:
6418                                         self._irrelevant_blockers.add(blocker, pkg)
6419                                         if not self._blocker_parents.child_nodes(pkg):
6420                                                 self._blocker_parents.remove(pkg)
6421                                 continue
6422                         for parent in self._blocker_parents.parent_nodes(blocker):
6423                                 unresolved_blocks = False
6424                                 depends_on_order = set()
6425                                 for pkg in blocked_initial:
6426                                         if pkg.slot_atom == parent.slot_atom:
6427                                                 # TODO: Support blocks within slots in cases where it
6428                                                 # might make sense.  For example, a new version might
6429                                                 # require that the old version be uninstalled at build
6430                                                 # time.
6431                                                 continue
6432                                         if parent.installed:
6433                                                 # Two currently installed packages conflict with
6434                                                 # eachother. Ignore this case since the damage
6435                                                 # is already done and this would be likely to
6436                                                 # confuse users if displayed like a normal blocker.
6437                                                 continue
6438                                         if parent.operation == "merge":
6439                                                 # Maybe the blocked package can be replaced or simply
6440                                                 # unmerged to resolve this block.
6441                                                 depends_on_order.add((pkg, parent))
6442                                                 continue
6443                                         # None of the above blocker resolutions techniques apply,
6444                                         # so apparently this one is unresolvable.
6445                                         unresolved_blocks = True
6446                                 for pkg in blocked_final:
6447                                         if pkg.slot_atom == parent.slot_atom:
6448                                                 # TODO: Support blocks within slots.
6449                                                 continue
6450                                         if parent.operation == "nomerge" and \
6451                                                 pkg.operation == "nomerge":
6452                                                 # This blocker will be handled the next time that a
6453                                                 # merge of either package is triggered.
6454                                                 continue
6455
6456                                         # Maybe the blocking package can be
6457                                         # unmerged to resolve this block.
6458                                         if parent.operation == "merge" and pkg.installed:
6459                                                 depends_on_order.add((pkg, parent))
6460                                                 continue
6461                                         elif parent.operation == "nomerge":
6462                                                 depends_on_order.add((parent, pkg))
6463                                                 continue
6464                                         # None of the above blocker resolutions techniques apply,
6465                                         # so apparently this one is unresolvable.
6466                                         unresolved_blocks = True
6467
6468                                 # Make sure we don't unmerge any package that have been pulled
6469                                 # into the graph.
6470                                 if not unresolved_blocks and depends_on_order:
6471                                         for inst_pkg, inst_task in depends_on_order:
6472                                                 if self.digraph.contains(inst_pkg) and \
6473                                                         self.digraph.parent_nodes(inst_pkg):
6474                                                         unresolved_blocks = True
6475                                                         break
6476
6477                                 if not unresolved_blocks and depends_on_order:
6478                                         for inst_pkg, inst_task in depends_on_order:
6479                                                 uninst_task = Package(built=inst_pkg.built,
6480                                                         cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6481                                                         metadata=inst_pkg.metadata,
6482                                                         operation="uninstall",
6483                                                         root_config=inst_pkg.root_config,
6484                                                         type_name=inst_pkg.type_name)
6485                                                 self._pkg_cache[uninst_task] = uninst_task
6486                                                 # Enforce correct merge order with a hard dep.
6487                                                 self.digraph.addnode(uninst_task, inst_task,
6488                                                         priority=BlockerDepPriority.instance)
6489                                                 # Count references to this blocker so that it can be
6490                                                 # invalidated after nodes referencing it have been
6491                                                 # merged.
6492                                                 self._blocker_uninstalls.addnode(uninst_task, blocker)
6493                                 if not unresolved_blocks and not depends_on_order:
6494                                         self._irrelevant_blockers.add(blocker, parent)
6495                                         self._blocker_parents.remove_edge(blocker, parent)
6496                                         if not self._blocker_parents.parent_nodes(blocker):
6497                                                 self._blocker_parents.remove(blocker)
6498                                         if not self._blocker_parents.child_nodes(parent):
6499                                                 self._blocker_parents.remove(parent)
6500                                 if unresolved_blocks:
6501                                         self._unsolvable_blockers.add(blocker, parent)
6502
6503                 return True
6504
6505         def _accept_blocker_conflicts(self):
6506                 acceptable = False
6507                 for x in ("--buildpkgonly", "--fetchonly",
6508                         "--fetch-all-uri", "--nodeps", "--pretend"):
6509                         if x in self.myopts:
6510                                 acceptable = True
6511                                 break
6512                 return acceptable
6513
6514         def _merge_order_bias(self, mygraph):
6515                 """Order nodes from highest to lowest overall reference count for
6516                 optimal leaf node selection."""
6517                 node_info = {}
6518                 for node in mygraph.order:
6519                         node_info[node] = len(mygraph.parent_nodes(node))
6520                 def cmp_merge_preference(node1, node2):
6521                         return node_info[node2] - node_info[node1]
6522                 mygraph.order.sort(cmp_merge_preference)
6523
6524         def altlist(self, reversed=False):
6525
6526                 while self._serialized_tasks_cache is None:
6527                         self._resolve_conflicts()
6528                         try:
6529                                 self._serialized_tasks_cache, self._scheduler_graph = \
6530                                         self._serialize_tasks()
6531                         except self._serialize_tasks_retry:
6532                                 pass
6533
6534                 retlist = self._serialized_tasks_cache[:]
6535                 if reversed:
6536                         retlist.reverse()
6537                 return retlist
6538
6539         def schedulerGraph(self):
6540                 """
6541                 The scheduler graph is identical to the normal one except that
6542                 uninstall edges are reversed in specific cases that require
6543                 conflicting packages to be temporarily installed simultaneously.
6544                 This is intended for use by the Scheduler in it's parallelization
6545                 logic. It ensures that temporary simultaneous installation of
6546                 conflicting packages is avoided when appropriate (especially for
6547                 !!atom blockers), but allowed in specific cases that require it.
6548
6549                 Note that this method calls break_refs() which alters the state of
6550                 internal Package instances such that this depgraph instance should
6551                 not be used to perform any more calculations.
6552                 """
6553                 if self._scheduler_graph is None:
6554                         self.altlist()
6555                 self.break_refs(self._scheduler_graph.order)
6556                 return self._scheduler_graph
6557
6558         def break_refs(self, nodes):
6559                 """
6560                 Take a mergelist like that returned from self.altlist() and
6561                 break any references that lead back to the depgraph. This is
6562                 useful if you want to hold references to packages without
6563                 also holding the depgraph on the heap.
6564                 """
6565                 for node in nodes:
6566                         if hasattr(node, "root_config"):
6567                                 # The FakeVartree references the _package_cache which
6568                                 # references the depgraph. So that Package instances don't
6569                                 # hold the depgraph and FakeVartree on the heap, replace
6570                                 # the RootConfig that references the FakeVartree with the
6571                                 # original RootConfig instance which references the actual
6572                                 # vartree.
6573                                 node.root_config = \
6574                                         self._trees_orig[node.root_config.root]["root_config"]
6575
6576         def _resolve_conflicts(self):
6577                 if not self._complete_graph():
6578                         raise self._unknown_internal_error()
6579
6580                 if not self.validate_blockers():
6581                         raise self._unknown_internal_error()
6582
6583                 if self._slot_collision_info:
6584                         self._process_slot_conflicts()
6585
6586         def _serialize_tasks(self):
6587
6588                 if "--debug" in self.myopts:
6589                         writemsg("\ndigraph:\n\n", noiselevel=-1)
6590                         self.digraph.debug_print()
6591                         writemsg("\n", noiselevel=-1)
6592
6593                 scheduler_graph = self.digraph.copy()
6594                 mygraph=self.digraph.copy()
6595                 # Prune "nomerge" root nodes if nothing depends on them, since
6596                 # otherwise they slow down merge order calculation. Don't remove
6597                 # non-root nodes since they help optimize merge order in some cases
6598                 # such as revdep-rebuild.
6599                 removed_nodes = set()
6600                 while True:
6601                         for node in mygraph.root_nodes():
6602                                 if not isinstance(node, Package) or \
6603                                         node.installed or node.onlydeps:
6604                                         removed_nodes.add(node)
6605                         if removed_nodes:
6606                                 self.spinner.update()
6607                                 mygraph.difference_update(removed_nodes)
6608                         if not removed_nodes:
6609                                 break
6610                         removed_nodes.clear()
6611                 self._merge_order_bias(mygraph)
6612                 def cmp_circular_bias(n1, n2):
6613                         """
6614                         RDEPEND is stronger than PDEPEND and this function
6615                         measures such a strength bias within a circular
6616                         dependency relationship.
6617                         """
6618                         n1_n2_medium = n2 in mygraph.child_nodes(n1,
6619                                 ignore_priority=DepPriority.MEDIUM_SOFT)
6620                         n2_n1_medium = n1 in mygraph.child_nodes(n2,
6621                                 ignore_priority=DepPriority.MEDIUM_SOFT)
6622                         if n1_n2_medium == n2_n1_medium:
6623                                 return 0
6624                         elif n1_n2_medium:
6625                                 return 1
6626                         return -1
6627                 myblocker_uninstalls = self._blocker_uninstalls.copy()
6628                 retlist=[]
6629                 # Contains uninstall tasks that have been scheduled to
6630                 # occur after overlapping blockers have been installed.
6631                 scheduled_uninstalls = set()
6632                 # Contains any Uninstall tasks that have been ignored
6633                 # in order to avoid the circular deps code path. These
6634                 # correspond to blocker conflicts that could not be
6635                 # resolved.
6636                 ignored_uninstall_tasks = set()
6637                 have_uninstall_task = False
6638                 complete = "complete" in self.myparams
6639                 myblocker_parents = self._blocker_parents.copy()
6640                 asap_nodes = []
6641
6642                 def get_nodes(**kwargs):
6643                         """
6644                         Returns leaf nodes excluding Uninstall instances
6645                         since those should be executed as late as possible.
6646                         """
6647                         return [node for node in mygraph.leaf_nodes(**kwargs) \
6648                                 if isinstance(node, Package) and \
6649                                         (node.operation != "uninstall" or \
6650                                         node in scheduled_uninstalls)]
6651
6652                 # sys-apps/portage needs special treatment if ROOT="/"
6653                 running_root = self._running_root.root
6654                 from portage.const import PORTAGE_PACKAGE_ATOM
6655                 runtime_deps = InternalPackageSet(
6656                         initial_atoms=[PORTAGE_PACKAGE_ATOM])
6657                 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
6658                         PORTAGE_PACKAGE_ATOM)
6659                 replacement_portage = self.mydbapi[running_root].match_pkgs(
6660                         PORTAGE_PACKAGE_ATOM)
6661
6662                 if running_portage:
6663                         running_portage = running_portage[0]
6664                 else:
6665                         running_portage = None
6666
6667                 if replacement_portage:
6668                         replacement_portage = replacement_portage[0]
6669                 else:
6670                         replacement_portage = None
6671
6672                 if replacement_portage == running_portage:
6673                         replacement_portage = None
6674
6675                 if replacement_portage is not None:
6676                         # update from running_portage to replacement_portage asap
6677                         asap_nodes.append(replacement_portage)
6678
6679                 if running_portage is not None:
6680                         try:
6681                                 portage_rdepend = self._select_atoms_highest_available(
6682                                         running_root, running_portage.metadata["RDEPEND"],
6683                                         myuse=running_portage.use.enabled,
6684                                         parent=running_portage, strict=False)
6685                         except portage.exception.InvalidDependString, e:
6686                                 portage.writemsg("!!! Invalid RDEPEND in " + \
6687                                         "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
6688                                         (running_root, running_portage.cpv, e), noiselevel=-1)
6689                                 del e
6690                                 portage_rdepend = []
6691                         runtime_deps.update(atom for atom in portage_rdepend \
6692                                 if not atom.startswith("!"))
6693
6694                 ignore_priority_soft_range = [None]
6695                 ignore_priority_soft_range.extend(
6696                         xrange(DepPriority.MIN, DepPriority.MEDIUM_SOFT + 1))
6697                 tree_mode = "--tree" in self.myopts
6698                 # Tracks whether or not the current iteration should prefer asap_nodes
6699                 # if available.  This is set to False when the previous iteration
6700                 # failed to select any nodes.  It is reset whenever nodes are
6701                 # successfully selected.
6702                 prefer_asap = True
6703
6704                 # By default, try to avoid selecting root nodes whenever possible. This
6705                 # helps ensure that the maximimum possible number of soft dependencies
6706                 # have been removed from the graph before their parent nodes have
6707                 # selected. This is especially important when those dependencies are
6708                 # going to be rebuilt by revdep-rebuild or `emerge -e system` after the
6709                 # CHOST has been changed (like when building a stage3 from a stage2).
6710                 accept_root_node = False
6711
6712                 # State of prefer_asap and accept_root_node flags for successive
6713                 # iterations that loosen the criteria for node selection.
6714                 #
6715                 # iteration   prefer_asap   accept_root_node
6716                 # 1           True          False
6717                 # 2           False         False
6718                 # 3           False         True
6719                 #
6720                 # If no nodes are selected on the 3rd iteration, it is due to
6721                 # unresolved blockers or circular dependencies.
6722
6723                 while not mygraph.empty():
6724                         self.spinner.update()
6725                         selected_nodes = None
6726                         ignore_priority = None
6727                         if prefer_asap and asap_nodes:
6728                                 """ASAP nodes are merged before their soft deps."""
6729                                 asap_nodes = [node for node in asap_nodes \
6730                                         if mygraph.contains(node)]
6731                                 for node in asap_nodes:
6732                                         if not mygraph.child_nodes(node,
6733                                                 ignore_priority=DepPriority.SOFT):
6734                                                 selected_nodes = [node]
6735                                                 asap_nodes.remove(node)
6736                                                 break
6737                         if not selected_nodes and \
6738                                 not (prefer_asap and asap_nodes):
6739                                 for ignore_priority in ignore_priority_soft_range:
6740                                         nodes = get_nodes(ignore_priority=ignore_priority)
6741                                         if nodes:
6742                                                 break
6743                                 if nodes:
6744                                         if ignore_priority is None and not tree_mode:
6745                                                 # Greedily pop all of these nodes since no relationship
6746                                                 # has been ignored.  This optimization destroys --tree
6747                                                 # output, so it's disabled in reversed mode. If there
6748                                                 # is a mix of merge and uninstall nodes, save the
6749                                                 # uninstall nodes from later since sometimes a merge
6750                                                 # node will render an install node unnecessary, and
6751                                                 # we want to avoid doing a separate uninstall task in
6752                                                 # that case.
6753                                                 merge_nodes = [node for node in nodes \
6754                                                         if node.operation == "merge"]
6755                                                 if merge_nodes:
6756                                                         selected_nodes = merge_nodes
6757                                                 else:
6758                                                         selected_nodes = nodes
6759                                         else:
6760                                                 # For optimal merge order:
6761                                                 #  * Only pop one node.
6762                                                 #  * Removing a root node (node without a parent)
6763                                                 #    will not produce a leaf node, so avoid it.
6764                                                 for node in nodes:
6765                                                         if mygraph.parent_nodes(node):
6766                                                                 # found a non-root node
6767                                                                 selected_nodes = [node]
6768                                                                 break
6769                                                 if not selected_nodes and \
6770                                                         (accept_root_node or ignore_priority is None):
6771                                                         # settle for a root node
6772                                                         selected_nodes = [nodes[0]]
6773
6774                         if not selected_nodes:
6775                                 nodes = get_nodes(ignore_priority=DepPriority.MEDIUM)
6776                                 if nodes:
6777                                         """Recursively gather a group of nodes that RDEPEND on
6778                                         eachother.  This ensures that they are merged as a group
6779                                         and get their RDEPENDs satisfied as soon as possible."""
6780                                         def gather_deps(ignore_priority,
6781                                                 mergeable_nodes, selected_nodes, node):
6782                                                 if node in selected_nodes:
6783                                                         return True
6784                                                 if node not in mergeable_nodes:
6785                                                         return False
6786                                                 if node == replacement_portage and \
6787                                                         mygraph.child_nodes(node,
6788                                                         ignore_priority=DepPriority.MEDIUM_SOFT):
6789                                                         # Make sure that portage always has all of it's
6790                                                         # RDEPENDs installed first.
6791                                                         return False
6792                                                 selected_nodes.add(node)
6793                                                 for child in mygraph.child_nodes(node,
6794                                                         ignore_priority=ignore_priority):
6795                                                         if not gather_deps(ignore_priority,
6796                                                                 mergeable_nodes, selected_nodes, child):
6797                                                                 return False
6798                                                 return True
6799                                         mergeable_nodes = set(nodes)
6800                                         if prefer_asap and asap_nodes:
6801                                                 nodes = asap_nodes
6802                                         for ignore_priority in xrange(DepPriority.SOFT,
6803                                                 DepPriority.MEDIUM_SOFT + 1):
6804                                                 for node in nodes:
6805                                                         if nodes is not asap_nodes and \
6806                                                                 not accept_root_node and \
6807                                                                 not mygraph.parent_nodes(node):
6808                                                                 continue
6809                                                         selected_nodes = set()
6810                                                         if gather_deps(ignore_priority,
6811                                                                 mergeable_nodes, selected_nodes, node):
6812                                                                 break
6813                                                         else:
6814                                                                 selected_nodes = None
6815                                                 if selected_nodes:
6816                                                         break
6817
6818                                         # If any nodes have been selected here, it's always
6819                                         # possible that anything up to a MEDIUM_SOFT priority
6820                                         # relationship has been ignored. This state is recorded
6821                                         # in ignore_priority so that relevant nodes will be
6822                                         # added to asap_nodes when appropriate.
6823                                         if selected_nodes:
6824                                                 ignore_priority = DepPriority.MEDIUM_SOFT
6825
6826                                         if prefer_asap and asap_nodes and not selected_nodes:
6827                                                 # We failed to find any asap nodes to merge, so ignore
6828                                                 # them for the next iteration.
6829                                                 prefer_asap = False
6830                                                 continue
6831
6832                                         if not selected_nodes and not accept_root_node:
6833                                                 # Maybe there are only root nodes left, so accept them
6834                                                 # for the next iteration.
6835                                                 accept_root_node = True
6836                                                 continue
6837
6838                         if selected_nodes and ignore_priority > DepPriority.SOFT:
6839                                 # Try to merge ignored medium deps as soon as possible.
6840                                 for node in selected_nodes:
6841                                         children = set(mygraph.child_nodes(node))
6842                                         soft = children.difference(
6843                                                 mygraph.child_nodes(node,
6844                                                 ignore_priority=DepPriority.SOFT))
6845                                         medium_soft = children.difference(
6846                                                 mygraph.child_nodes(node,
6847                                                 ignore_priority=DepPriority.MEDIUM_SOFT))
6848                                         medium_soft.difference_update(soft)
6849                                         for child in medium_soft:
6850                                                 if child in selected_nodes:
6851                                                         continue
6852                                                 if child in asap_nodes:
6853                                                         continue
6854                                                 asap_nodes.append(child)
6855
6856                         if selected_nodes and len(selected_nodes) > 1:
6857                                 if not isinstance(selected_nodes, list):
6858                                         selected_nodes = list(selected_nodes)
6859                                 selected_nodes.sort(cmp_circular_bias)
6860
6861                         if not selected_nodes and not myblocker_uninstalls.is_empty():
6862                                 # An Uninstall task needs to be executed in order to
6863                                 # avoid conflict if possible.
6864                                 min_parent_deps = None
6865                                 uninst_task = None
6866                                 for task in myblocker_uninstalls.leaf_nodes():
6867                                         # Do some sanity checks so that system or world packages
6868                                         # don't get uninstalled inappropriately here (only really
6869                                         # necessary when --complete-graph has not been enabled).
6870
6871                                         if task in ignored_uninstall_tasks:
6872                                                 continue
6873
6874                                         if task in scheduled_uninstalls:
6875                                                 # It's been scheduled but it hasn't
6876                                                 # been executed yet due to dependence
6877                                                 # on installation of blocking packages.
6878                                                 continue
6879
6880                                         root_config = self.roots[task.root]
6881                                         inst_pkg = self._pkg_cache[
6882                                                 ("installed", task.root, task.cpv, "nomerge")]
6883
6884                                         if self.digraph.contains(inst_pkg):
6885                                                 continue
6886
6887                                         forbid_overlap = False
6888                                         heuristic_overlap = False
6889                                         for blocker in myblocker_uninstalls.parent_nodes(task):
6890                                                 if blocker.eapi in ("0", "1"):
6891                                                         heuristic_overlap = True
6892                                                 elif blocker.atom.blocker.overlap.forbid:
6893                                                         forbid_overlap = True
6894                                                         break
6895                                         if forbid_overlap and running_root == task.root:
6896                                                 continue
6897
6898                                         if heuristic_overlap and running_root == task.root:
6899                                                 # Never uninstall sys-apps/portage or it's essential
6900                                                 # dependencies, except through replacement.
6901                                                 try:
6902                                                         runtime_dep_atoms = \
6903                                                                 list(runtime_deps.iterAtomsForPackage(task))
6904                                                 except portage.exception.InvalidDependString, e:
6905                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
6906                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6907                                                                 (task.root, task.cpv, e), noiselevel=-1)
6908                                                         del e
6909                                                         continue
6910
6911                                                 # Don't uninstall a runtime dep if it appears
6912                                                 # to be the only suitable one installed.
6913                                                 skip = False
6914                                                 vardb = root_config.trees["vartree"].dbapi
6915                                                 for atom in runtime_dep_atoms:
6916                                                         other_version = None
6917                                                         for pkg in vardb.match_pkgs(atom):
6918                                                                 if pkg.cpv == task.cpv and \
6919                                                                         pkg.metadata["COUNTER"] == \
6920                                                                         task.metadata["COUNTER"]:
6921                                                                         continue
6922                                                                 other_version = pkg
6923                                                                 break
6924                                                         if other_version is None:
6925                                                                 skip = True
6926                                                                 break
6927                                                 if skip:
6928                                                         continue
6929
6930                                                 # For packages in the system set, don't take
6931                                                 # any chances. If the conflict can't be resolved
6932                                                 # by a normal replacement operation then abort.
6933                                                 skip = False
6934                                                 try:
6935                                                         for atom in root_config.sets[
6936                                                                 "system"].iterAtomsForPackage(task):
6937                                                                 skip = True
6938                                                                 break
6939                                                 except portage.exception.InvalidDependString, e:
6940                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
6941                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6942                                                                 (task.root, task.cpv, e), noiselevel=-1)
6943                                                         del e
6944                                                         skip = True
6945                                                 if skip:
6946                                                         continue
6947
6948                                         # Note that the world check isn't always
6949                                         # necessary since self._complete_graph() will
6950                                         # add all packages from the system and world sets to the
6951                                         # graph. This just allows unresolved conflicts to be
6952                                         # detected as early as possible, which makes it possible
6953                                         # to avoid calling self._complete_graph() when it is
6954                                         # unnecessary due to blockers triggering an abortion.
6955                                         if not complete:
6956                                                 # For packages in the world set, go ahead an uninstall
6957                                                 # when necessary, as long as the atom will be satisfied
6958                                                 # in the final state.
6959                                                 graph_db = self.mydbapi[task.root]
6960                                                 skip = False
6961                                                 try:
6962                                                         for atom in root_config.sets[
6963                                                                 "world"].iterAtomsForPackage(task):
6964                                                                 satisfied = False
6965                                                                 for pkg in graph_db.match_pkgs(atom):
6966                                                                         if pkg == inst_pkg:
6967                                                                                 continue
6968                                                                         satisfied = True
6969                                                                         break
6970                                                                 if not satisfied:
6971                                                                         skip = True
6972                                                                         break
6973                                                 except portage.exception.InvalidDependString, e:
6974                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
6975                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6976                                                                 (task.root, task.cpv, e), noiselevel=-1)
6977                                                         del e
6978                                                         skip = True
6979                                                 if skip:
6980                                                         continue
6981
6982                                         # Check the deps of parent nodes to ensure that
6983                                         # the chosen task produces a leaf node. Maybe
6984                                         # this can be optimized some more to make the
6985                                         # best possible choice, but the current algorithm
6986                                         # is simple and should be near optimal for most
6987                                         # common cases.
6988                                         parent_deps = set()
6989                                         for parent in mygraph.parent_nodes(task):
6990                                                 parent_deps.update(mygraph.child_nodes(parent,
6991                                                         ignore_priority=DepPriority.MEDIUM_SOFT))
6992                                         parent_deps.remove(task)
6993                                         if min_parent_deps is None or \
6994                                                 len(parent_deps) < min_parent_deps:
6995                                                 min_parent_deps = len(parent_deps)
6996                                                 uninst_task = task
6997
6998                                 if uninst_task is not None:
6999                                         # The uninstall is performed only after blocking
7000                                         # packages have been merged on top of it. File
7001                                         # collisions between blocking packages are detected
7002                                         # and removed from the list of files to be uninstalled.
7003                                         scheduled_uninstalls.add(uninst_task)
7004                                         parent_nodes = mygraph.parent_nodes(uninst_task)
7005
7006                                         # Reverse the parent -> uninstall edges since we want
7007                                         # to do the uninstall after blocking packages have
7008                                         # been merged on top of it.
7009                                         mygraph.remove(uninst_task)
7010                                         for blocked_pkg in parent_nodes:
7011                                                 mygraph.add(blocked_pkg, uninst_task,
7012                                                         priority=BlockerDepPriority.instance)
7013                                                 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7014                                                 scheduler_graph.add(blocked_pkg, uninst_task,
7015                                                         priority=BlockerDepPriority.instance)
7016
7017                                 else:
7018                                         # None of the Uninstall tasks are acceptable, so
7019                                         # the corresponding blockers are unresolvable.
7020                                         # We need to drop an Uninstall task here in order
7021                                         # to avoid the circular deps code path, but the
7022                                         # blocker will still be counted as an unresolved
7023                                         # conflict.
7024                                         for node in myblocker_uninstalls.leaf_nodes():
7025                                                 try:
7026                                                         mygraph.remove(node)
7027                                                 except KeyError:
7028                                                         pass
7029                                                 else:
7030                                                         uninst_task = node
7031                                                         ignored_uninstall_tasks.add(node)
7032                                                         break
7033
7034                                 if uninst_task is not None:
7035                                         # After dropping an Uninstall task, reset
7036                                         # the state variables for leaf node selection and
7037                                         # continue trying to select leaf nodes.
7038                                         prefer_asap = True
7039                                         accept_root_node = False
7040                                         continue
7041
7042                         if not selected_nodes:
7043                                 self._circular_deps_for_display = mygraph
7044                                 raise self._unknown_internal_error()
7045
7046                         # At this point, we've succeeded in selecting one or more nodes, so
7047                         # it's now safe to reset the prefer_asap and accept_root_node flags
7048                         # to their default states.
7049                         prefer_asap = True
7050                         accept_root_node = False
7051
7052                         mygraph.difference_update(selected_nodes)
7053
7054                         for node in selected_nodes:
7055                                 if isinstance(node, Package) and \
7056                                         node.operation == "nomerge":
7057                                         continue
7058
7059                                 # Handle interactions between blockers
7060                                 # and uninstallation tasks.
7061                                 solved_blockers = set()
7062                                 uninst_task = None
7063                                 if isinstance(node, Package) and \
7064                                         "uninstall" == node.operation:
7065                                         have_uninstall_task = True
7066                                         uninst_task = node
7067                                 else:
7068                                         vardb = self.trees[node.root]["vartree"].dbapi
7069                                         previous_cpv = vardb.match(node.slot_atom)
7070                                         if previous_cpv:
7071                                                 # The package will be replaced by this one, so remove
7072                                                 # the corresponding Uninstall task if necessary.
7073                                                 previous_cpv = previous_cpv[0]
7074                                                 uninst_task = \
7075                                                         ("installed", node.root, previous_cpv, "uninstall")
7076                                                 try:
7077                                                         mygraph.remove(uninst_task)
7078                                                 except KeyError:
7079                                                         pass
7080
7081                                 if uninst_task is not None and \
7082                                         uninst_task not in ignored_uninstall_tasks and \
7083                                         myblocker_uninstalls.contains(uninst_task):
7084                                         blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7085                                         myblocker_uninstalls.remove(uninst_task)
7086                                         # Discard any blockers that this Uninstall solves.
7087                                         for blocker in blocker_nodes:
7088                                                 if not myblocker_uninstalls.child_nodes(blocker):
7089                                                         myblocker_uninstalls.remove(blocker)
7090                                                         solved_blockers.add(blocker)
7091
7092                                 retlist.append(node)
7093
7094                                 if (isinstance(node, Package) and \
7095                                         "uninstall" == node.operation) or \
7096                                         (uninst_task is not None and \
7097                                         uninst_task in scheduled_uninstalls):
7098                                         # Include satisfied blockers in the merge list
7099                                         # since the user might be interested and also
7100                                         # it serves as an indicator that blocking packages
7101                                         # will be temporarily installed simultaneously.
7102                                         for blocker in solved_blockers:
7103                                                 retlist.append(Blocker(atom=blocker.atom,
7104                                                         root=blocker.root, eapi=blocker.eapi,
7105                                                         satisfied=True))
7106
7107                 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7108                 for node in myblocker_uninstalls.root_nodes():
7109                         unsolvable_blockers.add(node)
7110
7111                 for blocker in unsolvable_blockers:
7112                         retlist.append(blocker)
7113
7114                 # If any Uninstall tasks need to be executed in order
7115                 # to avoid a conflict, complete the graph with any
7116                 # dependencies that may have been initially
7117                 # neglected (to ensure that unsafe Uninstall tasks
7118                 # are properly identified and blocked from execution).
7119                 if have_uninstall_task and \
7120                         not complete and \
7121                         not unsolvable_blockers:
7122                         self.myparams.add("complete")
7123                         raise self._serialize_tasks_retry("")
7124
7125                 if unsolvable_blockers and \
7126                         not self._accept_blocker_conflicts():
7127                         self._unsatisfied_blockers_for_display = unsolvable_blockers
7128                         self._serialized_tasks_cache = retlist[:]
7129                         self._scheduler_graph = scheduler_graph
7130                         raise self._unknown_internal_error()
7131
7132                 if self._slot_collision_info and \
7133                         not self._accept_blocker_conflicts():
7134                         self._serialized_tasks_cache = retlist[:]
7135                         self._scheduler_graph = scheduler_graph
7136                         raise self._unknown_internal_error()
7137
7138                 return retlist, scheduler_graph
7139
7140         def _show_circular_deps(self, mygraph):
7141                 # No leaf nodes are available, so we have a circular
7142                 # dependency panic situation.  Reduce the noise level to a
7143                 # minimum via repeated elimination of root nodes since they
7144                 # have no parents and thus can not be part of a cycle.
7145                 while True:
7146                         root_nodes = mygraph.root_nodes(
7147                                 ignore_priority=DepPriority.MEDIUM_SOFT)
7148                         if not root_nodes:
7149                                 break
7150                         mygraph.difference_update(root_nodes)
7151                 # Display the USE flags that are enabled on nodes that are part
7152                 # of dependency cycles in case that helps the user decide to
7153                 # disable some of them.
7154                 display_order = []
7155                 tempgraph = mygraph.copy()
7156                 while not tempgraph.empty():
7157                         nodes = tempgraph.leaf_nodes()
7158                         if not nodes:
7159                                 node = tempgraph.order[0]
7160                         else:
7161                                 node = nodes[0]
7162                         display_order.append(node)
7163                         tempgraph.remove(node)
7164                 display_order.reverse()
7165                 self.myopts.pop("--quiet", None)
7166                 self.myopts.pop("--verbose", None)
7167                 self.myopts["--tree"] = True
7168                 portage.writemsg("\n\n", noiselevel=-1)
7169                 self.display(display_order)
7170                 prefix = colorize("BAD", " * ")
7171                 portage.writemsg("\n", noiselevel=-1)
7172                 portage.writemsg(prefix + "Error: circular dependencies:\n",
7173                         noiselevel=-1)
7174                 portage.writemsg("\n", noiselevel=-1)
7175                 mygraph.debug_print()
7176                 portage.writemsg("\n", noiselevel=-1)
7177                 portage.writemsg(prefix + "Note that circular dependencies " + \
7178                         "can often be avoided by temporarily\n", noiselevel=-1)
7179                 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7180                         "optional dependencies.\n", noiselevel=-1)
7181
7182         def _show_merge_list(self):
7183                 if self._serialized_tasks_cache is not None and \
7184                         not (self._displayed_list and \
7185                         (self._displayed_list == self._serialized_tasks_cache or \
7186                         self._displayed_list == \
7187                                 list(reversed(self._serialized_tasks_cache)))):
7188                         display_list = self._serialized_tasks_cache[:]
7189                         if "--tree" in self.myopts:
7190                                 display_list.reverse()
7191                         self.display(display_list)
7192
7193         def _show_unsatisfied_blockers(self, blockers):
7194                 self._show_merge_list()
7195                 msg = "Error: The above package list contains " + \
7196                         "packages which cannot be installed " + \
7197                         "at the same time on the same system."
7198                 prefix = colorize("BAD", " * ")
7199                 from textwrap import wrap
7200                 portage.writemsg("\n", noiselevel=-1)
7201                 for line in wrap(msg, 70):
7202                         portage.writemsg(prefix + line + "\n", noiselevel=-1)
7203                 if "--quiet" not in self.myopts:
7204                         show_blocker_docs_link()
7205
7206         def display(self, mylist, favorites=[], verbosity=None):
7207
7208                 # This is used to prevent display_problems() from
7209                 # redundantly displaying this exact same merge list
7210                 # again via _show_merge_list().
7211                 self._displayed_list = mylist
7212
7213                 if verbosity is None:
7214                         verbosity = ("--quiet" in self.myopts and 1 or \
7215                                 "--verbose" in self.myopts and 3 or 2)
7216                 favorites_set = InternalPackageSet(favorites)
7217                 oneshot = "--oneshot" in self.myopts or \
7218                         "--onlydeps" in self.myopts
7219                 columns = "--columns" in self.myopts
7220                 changelogs=[]
7221                 p=[]
7222                 blockers = []
7223
7224                 counters = PackageCounters()
7225
7226                 if verbosity == 1 and "--verbose" not in self.myopts:
7227                         def create_use_string(*args):
7228                                 return ""
7229                 else:
7230                         def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7231                                 old_iuse, old_use,
7232                                 is_new, reinst_flags,
7233                                 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7234                                 alphabetical=("--alphabetical" in self.myopts)):
7235                                 enabled = []
7236                                 if alphabetical:
7237                                         disabled = enabled
7238                                         removed = enabled
7239                                 else:
7240                                         disabled = []
7241                                         removed = []
7242                                 cur_iuse = set(cur_iuse)
7243                                 enabled_flags = cur_iuse.intersection(cur_use)
7244                                 removed_iuse = set(old_iuse).difference(cur_iuse)
7245                                 any_iuse = cur_iuse.union(old_iuse)
7246                                 any_iuse = list(any_iuse)
7247                                 any_iuse.sort()
7248                                 for flag in any_iuse:
7249                                         flag_str = None
7250                                         isEnabled = False
7251                                         reinst_flag = reinst_flags and flag in reinst_flags
7252                                         if flag in enabled_flags:
7253                                                 isEnabled = True
7254                                                 if is_new or flag in old_use and \
7255                                                         (all_flags or reinst_flag):
7256                                                         flag_str = red(flag)
7257                                                 elif flag not in old_iuse:
7258                                                         flag_str = yellow(flag) + "%*"
7259                                                 elif flag not in old_use:
7260                                                         flag_str = green(flag) + "*"
7261                                         elif flag in removed_iuse:
7262                                                 if all_flags or reinst_flag:
7263                                                         flag_str = yellow("-" + flag) + "%"
7264                                                         if flag in old_use:
7265                                                                 flag_str += "*"
7266                                                         flag_str = "(" + flag_str + ")"
7267                                                         removed.append(flag_str)
7268                                                 continue
7269                                         else:
7270                                                 if is_new or flag in old_iuse and \
7271                                                         flag not in old_use and \
7272                                                         (all_flags or reinst_flag):
7273                                                         flag_str = blue("-" + flag)
7274                                                 elif flag not in old_iuse:
7275                                                         flag_str = yellow("-" + flag)
7276                                                         if flag not in iuse_forced:
7277                                                                 flag_str += "%"
7278                                                 elif flag in old_use:
7279                                                         flag_str = green("-" + flag) + "*"
7280                                         if flag_str:
7281                                                 if flag in iuse_forced:
7282                                                         flag_str = "(" + flag_str + ")"
7283                                                 if isEnabled:
7284                                                         enabled.append(flag_str)
7285                                                 else:
7286                                                         disabled.append(flag_str)
7287
7288                                 if alphabetical:
7289                                         ret = " ".join(enabled)
7290                                 else:
7291                                         ret = " ".join(enabled + disabled + removed)
7292                                 if ret:
7293                                         ret = '%s="%s" ' % (name, ret)
7294                                 return ret
7295
7296                 repo_display = RepoDisplay(self.roots)
7297
7298                 tree_nodes = []
7299                 display_list = []
7300                 mygraph = self.digraph.copy()
7301
7302                 # If there are any Uninstall instances, add the corresponding
7303                 # blockers to the digraph (useful for --tree display).
7304
7305                 executed_uninstalls = set(node for node in mylist \
7306                         if isinstance(node, Package) and node.operation == "unmerge")
7307
7308                 for uninstall in self._blocker_uninstalls.leaf_nodes():
7309                         uninstall_parents = \
7310                                 self._blocker_uninstalls.parent_nodes(uninstall)
7311                         if not uninstall_parents:
7312                                 continue
7313
7314                         # Remove the corresponding "nomerge" node and substitute
7315                         # the Uninstall node.
7316                         inst_pkg = self._pkg_cache[
7317                                 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7318                         try:
7319                                 mygraph.remove(inst_pkg)
7320                         except KeyError:
7321                                 pass
7322
7323                         try:
7324                                 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7325                         except KeyError:
7326                                 inst_pkg_blockers = []
7327
7328                         # Break the Package -> Uninstall edges.
7329                         mygraph.remove(uninstall)
7330
7331                         # Resolution of a package's blockers
7332                         # depend on it's own uninstallation.
7333                         for blocker in inst_pkg_blockers:
7334                                 mygraph.add(uninstall, blocker)
7335
7336                         # Expand Package -> Uninstall edges into
7337                         # Package -> Blocker -> Uninstall edges.
7338                         for blocker in uninstall_parents:
7339                                 mygraph.add(uninstall, blocker)
7340                                 for parent in self._blocker_parents.parent_nodes(blocker):
7341                                         if parent != inst_pkg:
7342                                                 mygraph.add(blocker, parent)
7343
7344                         # If the uninstall task did not need to be executed because
7345                         # of an upgrade, display Blocker -> Upgrade edges since the
7346                         # corresponding Blocker -> Uninstall edges will not be shown.
7347                         upgrade_node = \
7348                                 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7349                         if upgrade_node is not None and \
7350                                 uninstall not in executed_uninstalls:
7351                                 for blocker in uninstall_parents:
7352                                         mygraph.add(upgrade_node, blocker)
7353
7354                 unsatisfied_blockers = []
7355                 i = 0
7356                 depth = 0
7357                 shown_edges = set()
7358                 for x in mylist:
7359                         if isinstance(x, Blocker) and not x.satisfied:
7360                                 unsatisfied_blockers.append(x)
7361                                 continue
7362                         graph_key = x
7363                         if "--tree" in self.myopts:
7364                                 depth = len(tree_nodes)
7365                                 while depth and graph_key not in \
7366                                         mygraph.child_nodes(tree_nodes[depth-1]):
7367                                                 depth -= 1
7368                                 if depth:
7369                                         tree_nodes = tree_nodes[:depth]
7370                                         tree_nodes.append(graph_key)
7371                                         display_list.append((x, depth, True))
7372                                         shown_edges.add((graph_key, tree_nodes[depth-1]))
7373                                 else:
7374                                         traversed_nodes = set() # prevent endless circles
7375                                         traversed_nodes.add(graph_key)
7376                                         def add_parents(current_node, ordered):
7377                                                 parent_nodes = None
7378                                                 # Do not traverse to parents if this node is an
7379                                                 # an argument or a direct member of a set that has
7380                                                 # been specified as an argument (system or world).
7381                                                 if current_node not in self._set_nodes:
7382                                                         parent_nodes = mygraph.parent_nodes(current_node)
7383                                                 if parent_nodes:
7384                                                         child_nodes = set(mygraph.child_nodes(current_node))
7385                                                         selected_parent = None
7386                                                         # First, try to avoid a direct cycle.
7387                                                         for node in parent_nodes:
7388                                                                 if not isinstance(node, (Blocker, Package)):
7389                                                                         continue
7390                                                                 if node not in traversed_nodes and \
7391                                                                         node not in child_nodes:
7392                                                                         edge = (current_node, node)
7393                                                                         if edge in shown_edges:
7394                                                                                 continue
7395                                                                         selected_parent = node
7396                                                                         break
7397                                                         if not selected_parent:
7398                                                                 # A direct cycle is unavoidable.
7399                                                                 for node in parent_nodes:
7400                                                                         if not isinstance(node, (Blocker, Package)):
7401                                                                                 continue
7402                                                                         if node not in traversed_nodes:
7403                                                                                 edge = (current_node, node)
7404                                                                                 if edge in shown_edges:
7405                                                                                         continue
7406                                                                                 selected_parent = node
7407                                                                                 break
7408                                                         if selected_parent:
7409                                                                 shown_edges.add((current_node, selected_parent))
7410                                                                 traversed_nodes.add(selected_parent)
7411                                                                 add_parents(selected_parent, False)
7412                                                 display_list.append((current_node,
7413                                                         len(tree_nodes), ordered))
7414                                                 tree_nodes.append(current_node)
7415                                         tree_nodes = []
7416                                         add_parents(graph_key, True)
7417                         else:
7418                                 display_list.append((x, depth, True))
7419                 mylist = display_list
7420                 for x in unsatisfied_blockers:
7421                         mylist.append((x, 0, True))
7422
7423                 last_merge_depth = 0
7424                 for i in xrange(len(mylist)-1,-1,-1):
7425                         graph_key, depth, ordered = mylist[i]
7426                         if not ordered and depth == 0 and i > 0 \
7427                                 and graph_key == mylist[i-1][0] and \
7428                                 mylist[i-1][1] == 0:
7429                                 # An ordered node got a consecutive duplicate when the tree was
7430                                 # being filled in.
7431                                 del mylist[i]
7432                                 continue
7433                         if ordered and graph_key[-1] != "nomerge":
7434                                 last_merge_depth = depth
7435                                 continue
7436                         if depth >= last_merge_depth or \
7437                                 i < len(mylist) - 1 and \
7438                                 depth >= mylist[i+1][1]:
7439                                         del mylist[i]
7440
7441                 from portage import flatten
7442                 from portage.dep import use_reduce, paren_reduce
7443                 # files to fetch list - avoids counting a same file twice
7444                 # in size display (verbose mode)
7445                 myfetchlist=[]
7446
7447                 # Use this set to detect when all the "repoadd" strings are "[0]"
7448                 # and disable the entire repo display in this case.
7449                 repoadd_set = set()
7450
7451                 for mylist_index in xrange(len(mylist)):
7452                         x, depth, ordered = mylist[mylist_index]
7453                         pkg_type = x[0]
7454                         myroot = x[1]
7455                         pkg_key = x[2]
7456                         portdb = self.trees[myroot]["porttree"].dbapi
7457                         bindb  = self.trees[myroot]["bintree"].dbapi
7458                         vardb = self.trees[myroot]["vartree"].dbapi
7459                         vartree = self.trees[myroot]["vartree"]
7460                         pkgsettings = self.pkgsettings[myroot]
7461
7462                         fetch=" "
7463                         indent = " " * depth
7464
7465                         if isinstance(x, Blocker):
7466                                 if x.satisfied:
7467                                         blocker_style = "PKG_BLOCKER_SATISFIED"
7468                                         addl = "%s  %s  " % (colorize(blocker_style, "b"), fetch)
7469                                 else:
7470                                         blocker_style = "PKG_BLOCKER"
7471                                         addl = "%s  %s  " % (colorize(blocker_style, "B"), fetch)
7472                                 if ordered:
7473                                         counters.blocks += 1
7474                                         if x.satisfied:
7475                                                 counters.blocks_satisfied += 1
7476                                 resolved = portage.key_expand(
7477                                         str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
7478                                 if "--columns" in self.myopts and "--quiet" in self.myopts:
7479                                         addl += " " + colorize(blocker_style, resolved)
7480                                 else:
7481                                         addl = "[%s %s] %s%s" % \
7482                                                 (colorize(blocker_style, "blocks"),
7483                                                 addl, indent, colorize(blocker_style, resolved))
7484                                 block_parents = self._blocker_parents.parent_nodes(x)
7485                                 block_parents = set([pnode[2] for pnode in block_parents])
7486                                 block_parents = ", ".join(block_parents)
7487                                 if resolved!=x[2]:
7488                                         addl += colorize(blocker_style,
7489                                                 " (\"%s\" is blocking %s)") % \
7490                                                 (str(x.atom).lstrip("!"), block_parents)
7491                                 else:
7492                                         addl += colorize(blocker_style,
7493                                                 " (is blocking %s)") % block_parents
7494                                 if isinstance(x, Blocker) and x.satisfied:
7495                                         if columns:
7496                                                 continue
7497                                         p.append(addl)
7498                                 else:
7499                                         blockers.append(addl)
7500                         else:
7501                                 pkg_status = x[3]
7502                                 pkg_merge = ordered and pkg_status == "merge"
7503                                 if not pkg_merge and pkg_status == "merge":
7504                                         pkg_status = "nomerge"
7505                                 built = pkg_type != "ebuild"
7506                                 installed = pkg_type == "installed"
7507                                 pkg = x
7508                                 metadata = pkg.metadata
7509                                 ebuild_path = None
7510                                 repo_name = metadata["repository"]
7511                                 if pkg_type == "ebuild":
7512                                         ebuild_path = portdb.findname(pkg_key)
7513                                         if not ebuild_path: # shouldn't happen
7514                                                 raise portage.exception.PackageNotFound(pkg_key)
7515                                         repo_path_real = os.path.dirname(os.path.dirname(
7516                                                 os.path.dirname(ebuild_path)))
7517                                 else:
7518                                         repo_path_real = portdb.getRepositoryPath(repo_name)
7519                                 pkg_use = list(pkg.use.enabled)
7520                                 try:
7521                                         restrict = flatten(use_reduce(paren_reduce(
7522                                                 pkg.metadata["RESTRICT"]), uselist=pkg_use))
7523                                 except portage.exception.InvalidDependString, e:
7524                                         if not pkg.installed:
7525                                                 show_invalid_depstring_notice(x,
7526                                                         pkg.metadata["RESTRICT"], str(e))
7527                                                 del e
7528                                                 return 1
7529                                         restrict = []
7530                                 if "ebuild" == pkg_type and x[3] != "nomerge" and \
7531                                         "fetch" in restrict:
7532                                         fetch = red("F")
7533                                         if ordered:
7534                                                 counters.restrict_fetch += 1
7535                                         if portdb.fetch_check(pkg_key, pkg_use):
7536                                                 fetch = green("f")
7537                                                 if ordered:
7538                                                         counters.restrict_fetch_satisfied += 1
7539
7540                                 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
7541                                 #param is used for -u, where you still *do* want to see when something is being upgraded.
7542                                 myoldbest = []
7543                                 myinslotlist = None
7544                                 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
7545                                 if vardb.cpv_exists(pkg_key):
7546                                         addl="  "+yellow("R")+fetch+"  "
7547                                         if ordered:
7548                                                 if pkg_merge:
7549                                                         counters.reinst += 1
7550                                                 elif pkg_status == "uninstall":
7551                                                         counters.uninst += 1
7552                                 # filter out old-style virtual matches
7553                                 elif installed_versions and \
7554                                         portage.cpv_getkey(installed_versions[0]) == \
7555                                         portage.cpv_getkey(pkg_key):
7556                                         myinslotlist = vardb.match(pkg.slot_atom)
7557                                         # If this is the first install of a new-style virtual, we
7558                                         # need to filter out old-style virtual matches.
7559                                         if myinslotlist and \
7560                                                 portage.cpv_getkey(myinslotlist[0]) != \
7561                                                 portage.cpv_getkey(pkg_key):
7562                                                 myinslotlist = None
7563                                         if myinslotlist:
7564                                                 myoldbest = myinslotlist[:]
7565                                                 addl = "   " + fetch
7566                                                 if not portage.dep.cpvequal(pkg_key,
7567                                                         portage.best([pkg_key] + myoldbest)):
7568                                                         # Downgrade in slot
7569                                                         addl += turquoise("U")+blue("D")
7570                                                         if ordered:
7571                                                                 counters.downgrades += 1
7572                                                 else:
7573                                                         # Update in slot
7574                                                         addl += turquoise("U") + " "
7575                                                         if ordered:
7576                                                                 counters.upgrades += 1
7577                                         else:
7578                                                 # New slot, mark it new.
7579                                                 addl = " " + green("NS") + fetch + "  "
7580                                                 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
7581                                                 if ordered:
7582                                                         counters.newslot += 1
7583
7584                                         if "--changelog" in self.myopts:
7585                                                 inst_matches = vardb.match(pkg.slot_atom)
7586                                                 if inst_matches:
7587                                                         changelogs.extend(self.calc_changelog(
7588                                                                 portdb.findname(pkg_key),
7589                                                                 inst_matches[0], pkg_key))
7590                                 else:
7591                                         addl = " " + green("N") + " " + fetch + "  "
7592                                         if ordered:
7593                                                 counters.new += 1
7594
7595                                 verboseadd = ""
7596                                 repoadd = None
7597
7598                                 if True:
7599                                         # USE flag display
7600                                         forced_flags = set()
7601                                         pkgsettings.setcpv(pkg) # for package.use.{mask,force}
7602                                         forced_flags.update(pkgsettings.useforce)
7603                                         forced_flags.update(pkgsettings.usemask)
7604
7605                                         cur_use = [flag for flag in pkg.use.enabled \
7606                                                 if flag in pkg.iuse.all]
7607                                         cur_iuse = sorted(pkg.iuse.all)
7608
7609                                         if myoldbest and myinslotlist:
7610                                                 previous_cpv = myoldbest[0]
7611                                         else:
7612                                                 previous_cpv = pkg.cpv
7613                                         if vardb.cpv_exists(previous_cpv):
7614                                                 old_iuse, old_use = vardb.aux_get(
7615                                                                 previous_cpv, ["IUSE", "USE"])
7616                                                 old_iuse = list(set(
7617                                                         filter_iuse_defaults(old_iuse.split())))
7618                                                 old_iuse.sort()
7619                                                 old_use = old_use.split()
7620                                                 is_new = False
7621                                         else:
7622                                                 old_iuse = []
7623                                                 old_use = []
7624                                                 is_new = True
7625
7626                                         old_use = [flag for flag in old_use if flag in old_iuse]
7627
7628                                         use_expand = pkgsettings["USE_EXPAND"].lower().split()
7629                                         use_expand.sort()
7630                                         use_expand.reverse()
7631                                         use_expand_hidden = \
7632                                                 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
7633
7634                                         def map_to_use_expand(myvals, forcedFlags=False,
7635                                                 removeHidden=True):
7636                                                 ret = {}
7637                                                 forced = {}
7638                                                 for exp in use_expand:
7639                                                         ret[exp] = []
7640                                                         forced[exp] = set()
7641                                                         for val in myvals[:]:
7642                                                                 if val.startswith(exp.lower()+"_"):
7643                                                                         if val in forced_flags:
7644                                                                                 forced[exp].add(val[len(exp)+1:])
7645                                                                         ret[exp].append(val[len(exp)+1:])
7646                                                                         myvals.remove(val)
7647                                                 ret["USE"] = myvals
7648                                                 forced["USE"] = [val for val in myvals \
7649                                                         if val in forced_flags]
7650                                                 if removeHidden:
7651                                                         for exp in use_expand_hidden:
7652                                                                 ret.pop(exp, None)
7653                                                 if forcedFlags:
7654                                                         return ret, forced
7655                                                 return ret
7656
7657                                         # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
7658                                         # are the only thing that triggered reinstallation.
7659                                         reinst_flags_map = {}
7660                                         reinstall_for_flags = self._reinstall_nodes.get(pkg)
7661                                         reinst_expand_map = None
7662                                         if reinstall_for_flags:
7663                                                 reinst_flags_map = map_to_use_expand(
7664                                                         list(reinstall_for_flags), removeHidden=False)
7665                                                 for k in list(reinst_flags_map):
7666                                                         if not reinst_flags_map[k]:
7667                                                                 del reinst_flags_map[k]
7668                                                 if not reinst_flags_map.get("USE"):
7669                                                         reinst_expand_map = reinst_flags_map.copy()
7670                                                         reinst_expand_map.pop("USE", None)
7671                                         if reinst_expand_map and \
7672                                                 not set(reinst_expand_map).difference(
7673                                                 use_expand_hidden):
7674                                                 use_expand_hidden = \
7675                                                         set(use_expand_hidden).difference(
7676                                                         reinst_expand_map)
7677
7678                                         cur_iuse_map, iuse_forced = \
7679                                                 map_to_use_expand(cur_iuse, forcedFlags=True)
7680                                         cur_use_map = map_to_use_expand(cur_use)
7681                                         old_iuse_map = map_to_use_expand(old_iuse)
7682                                         old_use_map = map_to_use_expand(old_use)
7683
7684                                         use_expand.sort()
7685                                         use_expand.insert(0, "USE")
7686                                         
7687                                         for key in use_expand:
7688                                                 if key in use_expand_hidden:
7689                                                         continue
7690                                                 verboseadd += create_use_string(key.upper(),
7691                                                         cur_iuse_map[key], iuse_forced[key],
7692                                                         cur_use_map[key], old_iuse_map[key],
7693                                                         old_use_map[key], is_new,
7694                                                         reinst_flags_map.get(key))
7695
7696                                 if verbosity == 3:
7697                                         # size verbose
7698                                         mysize=0
7699                                         if pkg_type == "ebuild" and pkg_merge:
7700                                                 try:
7701                                                         myfilesdict = portdb.getfetchsizes(pkg_key,
7702                                                                 useflags=pkg_use, debug=self.edebug)
7703                                                 except portage.exception.InvalidDependString, e:
7704                                                         src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
7705                                                         show_invalid_depstring_notice(x, src_uri, str(e))
7706                                                         del e
7707                                                         return 1
7708                                                 if myfilesdict is None:
7709                                                         myfilesdict="[empty/missing/bad digest]"
7710                                                 else:
7711                                                         for myfetchfile in myfilesdict:
7712                                                                 if myfetchfile not in myfetchlist:
7713                                                                         mysize+=myfilesdict[myfetchfile]
7714                                                                         myfetchlist.append(myfetchfile)
7715                                                         if ordered:
7716                                                                 counters.totalsize += mysize
7717                                                 verboseadd += format_size(mysize)
7718
7719                                         # overlay verbose
7720                                         # assign index for a previous version in the same slot
7721                                         has_previous = False
7722                                         repo_name_prev = None
7723                                         slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
7724                                                 metadata["SLOT"])
7725                                         slot_matches = vardb.match(slot_atom)
7726                                         if slot_matches:
7727                                                 has_previous = True
7728                                                 repo_name_prev = vardb.aux_get(slot_matches[0],
7729                                                         ["repository"])[0]
7730
7731                                         # now use the data to generate output
7732                                         if pkg.installed or not has_previous:
7733                                                 repoadd = repo_display.repoStr(repo_path_real)
7734                                         else:
7735                                                 repo_path_prev = None
7736                                                 if repo_name_prev:
7737                                                         repo_path_prev = portdb.getRepositoryPath(
7738                                                                 repo_name_prev)
7739                                                 if repo_path_prev == repo_path_real:
7740                                                         repoadd = repo_display.repoStr(repo_path_real)
7741                                                 else:
7742                                                         repoadd = "%s=>%s" % (
7743                                                                 repo_display.repoStr(repo_path_prev),
7744                                                                 repo_display.repoStr(repo_path_real))
7745                                         if repoadd:
7746                                                 repoadd_set.add(repoadd)
7747
7748                                 xs = [portage.cpv_getkey(pkg_key)] + \
7749                                         list(portage.catpkgsplit(pkg_key)[2:])
7750                                 if xs[2] == "r0":
7751                                         xs[2] = ""
7752                                 else:
7753                                         xs[2] = "-" + xs[2]
7754
7755                                 mywidth = 130
7756                                 if "COLUMNWIDTH" in self.settings:
7757                                         try:
7758                                                 mywidth = int(self.settings["COLUMNWIDTH"])
7759                                         except ValueError, e:
7760                                                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
7761                                                 portage.writemsg(
7762                                                         "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
7763                                                         self.settings["COLUMNWIDTH"], noiselevel=-1)
7764                                                 del e
7765                                 oldlp = mywidth - 30
7766                                 newlp = oldlp - 30
7767
7768                                 # Convert myoldbest from a list to a string.
7769                                 if not myoldbest:
7770                                         myoldbest = ""
7771                                 else:
7772                                         for pos, key in enumerate(myoldbest):
7773                                                 key = portage.catpkgsplit(key)[2] + \
7774                                                         "-" + portage.catpkgsplit(key)[3]
7775                                                 if key[-3:] == "-r0":
7776                                                         key = key[:-3]
7777                                                 myoldbest[pos] = key
7778                                         myoldbest = blue("["+", ".join(myoldbest)+"]")
7779
7780                                 pkg_cp = xs[0]
7781                                 root_config = self.roots[myroot]
7782                                 system_set = root_config.sets["system"]
7783                                 world_set  = root_config.sets["world"]
7784
7785                                 pkg_system = False
7786                                 pkg_world = False
7787                                 try:
7788                                         pkg_system = system_set.findAtomForPackage(pkg)
7789                                         pkg_world  = world_set.findAtomForPackage(pkg)
7790                                         if not (oneshot or pkg_world) and \
7791                                                 myroot == self.target_root and \
7792                                                 favorites_set.findAtomForPackage(pkg):
7793                                                 # Maybe it will be added to world now.
7794                                                 if create_world_atom(pkg, favorites_set, root_config):
7795                                                         pkg_world = True
7796                                 except portage.exception.InvalidDependString:
7797                                         # This is reported elsewhere if relevant.
7798                                         pass
7799
7800                                 def pkgprint(pkg_str):
7801                                         if pkg_merge:
7802                                                 if pkg_system:
7803                                                         return colorize("PKG_MERGE_SYSTEM", pkg_str)
7804                                                 elif pkg_world:
7805                                                         return colorize("PKG_MERGE_WORLD", pkg_str)
7806                                                 else:
7807                                                         return colorize("PKG_MERGE", pkg_str)
7808                                         elif pkg_status == "uninstall":
7809                                                 return colorize("PKG_UNINSTALL", pkg_str)
7810                                         else:
7811                                                 if pkg_system:
7812                                                         return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
7813                                                 elif pkg_world:
7814                                                         return colorize("PKG_NOMERGE_WORLD", pkg_str)
7815                                                 else:
7816                                                         return colorize("PKG_NOMERGE", pkg_str)
7817
7818                                 try:
7819                                         properties = flatten(use_reduce(paren_reduce(
7820                                                 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
7821                                 except portage.exception.InvalidDependString, e:
7822                                         if not pkg.installed:
7823                                                 show_invalid_depstring_notice(pkg,
7824                                                         pkg.metadata["PROPERTIES"], str(e))
7825                                                 del e
7826                                                 return 1
7827                                         properties = []
7828                                 interactive = "interactive" in properties
7829                                 if interactive and pkg.operation == "merge":
7830                                         addl = colorize("WARN", "I") + addl[1:]
7831                                         if ordered:
7832                                                 counters.interactive += 1
7833
7834                                 if x[1]!="/":
7835                                         if myoldbest:
7836                                                 myoldbest +=" "
7837                                         if "--columns" in self.myopts:
7838                                                 if "--quiet" in self.myopts:
7839                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
7840                                                         myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
7841                                                         myprint=myprint+myoldbest
7842                                                         myprint=myprint+darkgreen("to "+x[1])
7843                                                         verboseadd = None
7844                                                 else:
7845                                                         if not pkg_merge:
7846                                                                 myprint = "[%s] %s%s" % \
7847                                                                         (pkgprint(pkg_status.ljust(13)),
7848                                                                         indent, pkgprint(pkg.cp))
7849                                                         else:
7850                                                                 myprint = "[%s %s] %s%s" % \
7851                                                                         (pkgprint(pkg.type_name), addl,
7852                                                                         indent, pkgprint(pkg.cp))
7853                                                         if (newlp-nc_len(myprint)) > 0:
7854                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
7855                                                         myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
7856                                                         if (oldlp-nc_len(myprint)) > 0:
7857                                                                 myprint=myprint+" "*(oldlp-nc_len(myprint))
7858                                                         myprint=myprint+myoldbest
7859                                                         myprint += darkgreen("to " + pkg.root)
7860                                         else:
7861                                                 if not pkg_merge:
7862                                                         myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
7863                                                 else:
7864                                                         myprint = "[" + pkg_type + " " + addl + "] "
7865                                                 myprint += indent + pkgprint(pkg_key) + " " + \
7866                                                         myoldbest + darkgreen("to " + myroot)
7867                                 else:
7868                                         if "--columns" in self.myopts:
7869                                                 if "--quiet" in self.myopts:
7870                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
7871                                                         myprint=myprint+" "+green(xs[1]+xs[2])+" "
7872                                                         myprint=myprint+myoldbest
7873                                                         verboseadd = None
7874                                                 else:
7875                                                         if not pkg_merge:
7876                                                                 myprint = "[%s] %s%s" % \
7877                                                                         (pkgprint(pkg_status.ljust(13)),
7878                                                                         indent, pkgprint(pkg.cp))
7879                                                         else:
7880                                                                 myprint = "[%s %s] %s%s" % \
7881                                                                         (pkgprint(pkg.type_name), addl,
7882                                                                         indent, pkgprint(pkg.cp))
7883                                                         if (newlp-nc_len(myprint)) > 0:
7884                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
7885                                                         myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
7886                                                         if (oldlp-nc_len(myprint)) > 0:
7887                                                                 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
7888                                                         myprint += myoldbest
7889                                         else:
7890                                                 if not pkg_merge:
7891                                                         myprint = "[%s] %s%s %s" % \
7892                                                                 (pkgprint(pkg_status.ljust(13)),
7893                                                                 indent, pkgprint(pkg.cpv),
7894                                                                 myoldbest)
7895                                                 else:
7896                                                         myprint = "[%s %s] %s%s %s" % \
7897                                                                 (pkgprint(pkg_type), addl, indent,
7898                                                                 pkgprint(pkg.cpv), myoldbest)
7899
7900                                 if columns and pkg.operation == "uninstall":
7901                                         continue
7902                                 p.append((myprint, verboseadd, repoadd))
7903
7904                                 if "--tree" not in self.myopts and \
7905                                         "--quiet" not in self.myopts and \
7906                                         not self._opts_no_restart.intersection(self.myopts) and \
7907                                         pkg.root == self._running_root.root and \
7908                                         portage.match_from_list(
7909                                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
7910                                         not vardb.cpv_exists(pkg.cpv) and \
7911                                         "--quiet" not in self.myopts:
7912                                                 if mylist_index < len(mylist) - 1:
7913                                                         p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
7914                                                         p.append(colorize("WARN", "    then resume the merge."))
7915
7916                 out = sys.stdout
7917                 show_repos = repoadd_set and repoadd_set != set(["0"])
7918
7919                 for x in p:
7920                         if isinstance(x, basestring):
7921                                 out.write("%s\n" % (x,))
7922                                 continue
7923
7924                         myprint, verboseadd, repoadd = x
7925
7926                         if verboseadd:
7927                                 myprint += " " + verboseadd
7928
7929                         if show_repos and repoadd:
7930                                 myprint += " " + teal("[%s]" % repoadd)
7931
7932                         out.write("%s\n" % (myprint,))
7933
7934                 for x in blockers:
7935                         print x
7936
7937                 if verbosity == 3:
7938                         print
7939                         print counters
7940                         if show_repos:
7941                                 sys.stdout.write(str(repo_display))
7942
7943                 if "--changelog" in self.myopts:
7944                         print
7945                         for revision,text in changelogs:
7946                                 print bold('*'+revision)
7947                                 sys.stdout.write(text)
7948
7949                 sys.stdout.flush()
7950                 return os.EX_OK
7951
7952         def display_problems(self):
7953                 """
7954                 Display problems with the dependency graph such as slot collisions.
7955                 This is called internally by display() to show the problems _after_
7956                 the merge list where it is most likely to be seen, but if display()
7957                 is not going to be called then this method should be called explicitly
7958                 to ensure that the user is notified of problems with the graph.
7959
7960                 All output goes to stderr, except for unsatisfied dependencies which
7961                 go to stdout for parsing by programs such as autounmask.
7962                 """
7963
7964                 # Note that show_masked_packages() sends it's output to
7965                 # stdout, and some programs such as autounmask parse the
7966                 # output in cases when emerge bails out. However, when
7967                 # show_masked_packages() is called for installed packages
7968                 # here, the message is a warning that is more appropriate
7969                 # to send to stderr, so temporarily redirect stdout to
7970                 # stderr. TODO: Fix output code so there's a cleaner way
7971                 # to redirect everything to stderr.
7972                 sys.stdout.flush()
7973                 sys.stderr.flush()
7974                 stdout = sys.stdout
7975                 try:
7976                         sys.stdout = sys.stderr
7977                         self._display_problems()
7978                 finally:
7979                         sys.stdout = stdout
7980                         sys.stdout.flush()
7981                         sys.stderr.flush()
7982
7983                 # This goes to stdout for parsing by programs like autounmask.
7984                 for pargs, kwargs in self._unsatisfied_deps_for_display:
7985                         self._show_unsatisfied_dep(*pargs, **kwargs)
7986
7987         def _display_problems(self):
7988                 if self._circular_deps_for_display is not None:
7989                         self._show_circular_deps(
7990                                 self._circular_deps_for_display)
7991
7992                 # The user is only notified of a slot conflict if
7993                 # there are no unresolvable blocker conflicts.
7994                 if self._unsatisfied_blockers_for_display is not None:
7995                         self._show_unsatisfied_blockers(
7996                                 self._unsatisfied_blockers_for_display)
7997                 else:
7998                         self._show_slot_collision_notice()
7999
8000                 # TODO: Add generic support for "set problem" handlers so that
8001                 # the below warnings aren't special cases for world only.
8002
8003                 if self._missing_args:
8004                         world_problems = False
8005                         if "world" in self._sets:
8006                                 # Filter out indirect members of world (from nested sets)
8007                                 # since only direct members of world are desired here.
8008                                 world_set = self.roots[self.target_root].sets["world"]
8009                                 for arg, atom in self._missing_args:
8010                                         if arg.name == "world" and atom in world_set:
8011                                                 world_problems = True
8012                                                 break
8013
8014                         if world_problems:
8015                                 sys.stderr.write("\n!!! Problems have been " + \
8016                                         "detected with your world file\n")
8017                                 sys.stderr.write("!!! Please run " + \
8018                                         green("emaint --check world")+"\n\n")
8019
8020                 if self._missing_args:
8021                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8022                                 " Ebuilds for the following packages are either all\n")
8023                         sys.stderr.write(colorize("BAD", "!!!") + \
8024                                 " masked or don't exist:\n")
8025                         sys.stderr.write(" ".join(str(atom) for arg, atom in \
8026                                 self._missing_args) + "\n")
8027
8028                 if self._pprovided_args:
8029                         arg_refs = {}
8030                         for arg, atom in self._pprovided_args:
8031                                 if isinstance(arg, SetArg):
8032                                         parent = arg.name
8033                                         arg_atom = (atom, atom)
8034                                 else:
8035                                         parent = "args"
8036                                         arg_atom = (arg.arg, atom)
8037                                 refs = arg_refs.setdefault(arg_atom, [])
8038                                 if parent not in refs:
8039                                         refs.append(parent)
8040                         msg = []
8041                         msg.append(bad("\nWARNING: "))
8042                         if len(self._pprovided_args) > 1:
8043                                 msg.append("Requested packages will not be " + \
8044                                         "merged because they are listed in\n")
8045                         else:
8046                                 msg.append("A requested package will not be " + \
8047                                         "merged because it is listed in\n")
8048                         msg.append("package.provided:\n\n")
8049                         problems_sets = set()
8050                         for (arg, atom), refs in arg_refs.iteritems():
8051                                 ref_string = ""
8052                                 if refs:
8053                                         problems_sets.update(refs)
8054                                         refs.sort()
8055                                         ref_string = ", ".join(["'%s'" % name for name in refs])
8056                                         ref_string = " pulled in by " + ref_string
8057                                 msg.append("  %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8058                         msg.append("\n")
8059                         if "world" in problems_sets:
8060                                 msg.append("This problem can be solved in one of the following ways:\n\n")
8061                                 msg.append("  A) Use emaint to clean offending packages from world (if not installed).\n")
8062                                 msg.append("  B) Uninstall offending packages (cleans them from world).\n")
8063                                 msg.append("  C) Remove offending entries from package.provided.\n\n")
8064                                 msg.append("The best course of action depends on the reason that an offending\n")
8065                                 msg.append("package.provided entry exists.\n\n")
8066                         sys.stderr.write("".join(msg))
8067
8068                 masked_packages = []
8069                 for pkg in self._masked_installed:
8070                         root_config = pkg.root_config
8071                         pkgsettings = self.pkgsettings[pkg.root]
8072                         mreasons = get_masking_status(pkg, pkgsettings, root_config)
8073                         masked_packages.append((root_config, pkgsettings,
8074                                 pkg.cpv, pkg.metadata, mreasons))
8075                 if masked_packages:
8076                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8077                                 " The following installed packages are masked:\n")
8078                         show_masked_packages(masked_packages)
8079                         show_mask_docs()
8080                         print
8081
8082         def calc_changelog(self,ebuildpath,current,next):
8083                 if ebuildpath == None or not os.path.exists(ebuildpath):
8084                         return []
8085                 current = '-'.join(portage.catpkgsplit(current)[1:])
8086                 if current.endswith('-r0'):
8087                         current = current[:-3]
8088                 next = '-'.join(portage.catpkgsplit(next)[1:])
8089                 if next.endswith('-r0'):
8090                         next = next[:-3]
8091                 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8092                 try:
8093                         changelog = open(changelogpath).read()
8094                 except SystemExit, e:
8095                         raise # Needed else can't exit
8096                 except:
8097                         return []
8098                 divisions = self.find_changelog_tags(changelog)
8099                 #print 'XX from',current,'to',next
8100                 #for div,text in divisions: print 'XX',div
8101                 # skip entries for all revisions above the one we are about to emerge
8102                 for i in range(len(divisions)):
8103                         if divisions[i][0]==next:
8104                                 divisions = divisions[i:]
8105                                 break
8106                 # find out how many entries we are going to display
8107                 for i in range(len(divisions)):
8108                         if divisions[i][0]==current:
8109                                 divisions = divisions[:i]
8110                                 break
8111                 else:
8112                     # couldnt find the current revision in the list. display nothing
8113                         return []
8114                 return divisions
8115
8116         def find_changelog_tags(self,changelog):
8117                 divs = []
8118                 release = None
8119                 while 1:
8120                         match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8121                         if match is None:
8122                                 if release is not None:
8123                                         divs.append((release,changelog))
8124                                 return divs
8125                         if release is not None:
8126                                 divs.append((release,changelog[:match.start()]))
8127                         changelog = changelog[match.end():]
8128                         release = match.group(1)
8129                         if release.endswith('.ebuild'):
8130                                 release = release[:-7]
8131                         if release.endswith('-r0'):
8132                                 release = release[:-3]
8133
8134         def saveNomergeFavorites(self):
8135                 """Find atoms in favorites that are not in the mergelist and add them
8136                 to the world file if necessary."""
8137                 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8138                         "--oneshot", "--onlydeps", "--pretend"):
8139                         if x in self.myopts:
8140                                 return
8141                 root_config = self.roots[self.target_root]
8142                 world_set = root_config.sets["world"]
8143
8144                 world_locked = False
8145                 if hasattr(world_set, "lock"):
8146                         world_set.lock()
8147                         world_locked = True
8148
8149                 if hasattr(world_set, "load"):
8150                         world_set.load() # maybe it's changed on disk
8151
8152                 args_set = self._sets["args"]
8153                 portdb = self.trees[self.target_root]["porttree"].dbapi
8154                 added_favorites = set()
8155                 for x in self._set_nodes:
8156                         pkg_type, root, pkg_key, pkg_status = x
8157                         if pkg_status != "nomerge":
8158                                 continue
8159
8160                         try:
8161                                 myfavkey = create_world_atom(x, args_set, root_config)
8162                                 if myfavkey:
8163                                         if myfavkey in added_favorites:
8164                                                 continue
8165                                         added_favorites.add(myfavkey)
8166                         except portage.exception.InvalidDependString, e:
8167                                 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8168                                         (pkg_key, str(e)), noiselevel=-1)
8169                                 writemsg("!!! see '%s'\n\n" % os.path.join(
8170                                         root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8171                                 del e
8172                 all_added = []
8173                 for k in self._sets:
8174                         if k in ("args", "world") or not root_config.sets[k].world_candidate:
8175                                 continue
8176                         s = SETPREFIX + k
8177                         if s in world_set:
8178                                 continue
8179                         all_added.append(SETPREFIX + k)
8180                 all_added.extend(added_favorites)
8181                 all_added.sort()
8182                 for a in all_added:
8183                         print ">>> Recording %s in \"world\" favorites file..." % \
8184                                 colorize("INFORM", str(a))
8185                 if all_added:
8186                         world_set.update(all_added)
8187
8188                 if world_locked:
8189                         world_set.unlock()
8190
8191         def loadResumeCommand(self, resume_data, skip_masked=False):
8192                 """
8193                 Add a resume command to the graph and validate it in the process.  This
8194                 will raise a PackageNotFound exception if a package is not available.
8195                 """
8196
8197                 if not isinstance(resume_data, dict):
8198                         return False
8199
8200                 mergelist = resume_data.get("mergelist")
8201                 if not isinstance(mergelist, list):
8202                         mergelist = []
8203
8204                 fakedb = self.mydbapi
8205                 trees = self.trees
8206                 serialized_tasks = []
8207                 masked_tasks = []
8208                 for x in mergelist:
8209                         if not (isinstance(x, list) and len(x) == 4):
8210                                 continue
8211                         pkg_type, myroot, pkg_key, action = x
8212                         if pkg_type not in self.pkg_tree_map:
8213                                 continue
8214                         if action != "merge":
8215                                 continue
8216                         tree_type = self.pkg_tree_map[pkg_type]
8217                         mydb = trees[myroot][tree_type].dbapi
8218                         db_keys = list(self._trees_orig[myroot][
8219                                 tree_type].dbapi._aux_cache_keys)
8220                         try:
8221                                 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8222                         except KeyError:
8223                                 # It does no exist or it is corrupt.
8224                                 if action == "uninstall":
8225                                         continue
8226                                 raise portage.exception.PackageNotFound(pkg_key)
8227                         installed = action == "uninstall"
8228                         built = pkg_type != "ebuild"
8229                         root_config = self.roots[myroot]
8230                         pkg = Package(built=built, cpv=pkg_key,
8231                                 installed=installed, metadata=metadata,
8232                                 operation=action, root_config=root_config,
8233                                 type_name=pkg_type)
8234                         if pkg_type == "ebuild":
8235                                 pkgsettings = self.pkgsettings[myroot]
8236                                 pkgsettings.setcpv(pkg)
8237                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8238                         self._pkg_cache[pkg] = pkg
8239
8240                         root_config = self.roots[pkg.root]
8241                         if "merge" == pkg.operation and \
8242                                 not visible(root_config.settings, pkg):
8243                                 if skip_masked:
8244                                         masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8245                                 else:
8246                                         self._unsatisfied_deps_for_display.append(
8247                                                 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8248
8249                         fakedb[myroot].cpv_inject(pkg)
8250                         serialized_tasks.append(pkg)
8251                         self.spinner.update()
8252
8253                 if self._unsatisfied_deps_for_display:
8254                         return False
8255
8256                 if not serialized_tasks or "--nodeps" in self.myopts:
8257                         self._serialized_tasks_cache = serialized_tasks
8258                         self._scheduler_graph = self.digraph
8259                 else:
8260                         self._select_package = self._select_pkg_from_graph
8261                         self.myparams.add("selective")
8262
8263                         favorites = resume_data.get("favorites")
8264                         args_set = self._sets["args"]
8265                         if isinstance(favorites, list):
8266                                 args = self._load_favorites(favorites)
8267                         else:
8268                                 args = []
8269
8270                         for task in serialized_tasks:
8271                                 if isinstance(task, Package) and \
8272                                         task.operation == "merge":
8273                                         if not self._add_pkg(task, None):
8274                                                 return False
8275
8276                         # Packages for argument atoms need to be explicitly
8277                         # added via _add_pkg() so that they are included in the
8278                         # digraph (needed at least for --tree display).
8279                         for arg in args:
8280                                 for atom in arg.set:
8281                                         pkg, existing_node = self._select_package(
8282                                                 arg.root_config.root, atom)
8283                                         if existing_node is None and \
8284                                                 pkg is not None:
8285                                                 if not self._add_pkg(pkg, Dependency(atom=atom,
8286                                                         root=pkg.root, parent=arg)):
8287                                                         return False
8288
8289                         # Allow unsatisfied deps here to avoid showing a masking
8290                         # message for an unsatisfied dep that isn't necessarily
8291                         # masked.
8292                         if not self._create_graph(allow_unsatisfied=True):
8293                                 return False
8294                         if masked_tasks or self._unsatisfied_deps:
8295                                 # This probably means that a required package
8296                                 # was dropped via --skipfirst. It makes the
8297                                 # resume list invalid, so convert it to a
8298                                 # UnsatisfiedResumeDep exception.
8299                                 raise self.UnsatisfiedResumeDep(self,
8300                                         masked_tasks + self._unsatisfied_deps)
8301                         self._serialized_tasks_cache = None
8302                         try:
8303                                 self.altlist()
8304                         except self._unknown_internal_error:
8305                                 return False
8306
8307                 return True
8308
8309         def _load_favorites(self, favorites):
8310                 """
8311                 Use a list of favorites to resume state from a
8312                 previous select_files() call. This creates similar
8313                 DependencyArg instances to those that would have
8314                 been created by the original select_files() call.
8315                 This allows Package instances to be matched with
8316                 DependencyArg instances during graph creation.
8317                 """
8318                 root_config = self.roots[self.target_root]
8319                 getSetAtoms = root_config.setconfig.getSetAtoms
8320                 sets = root_config.sets
8321                 args = []
8322                 for x in favorites:
8323                         if not isinstance(x, basestring):
8324                                 continue
8325                         if x in ("system", "world"):
8326                                 x = SETPREFIX + x
8327                         if x.startswith(SETPREFIX):
8328                                 s = x[len(SETPREFIX):]
8329                                 if s not in sets:
8330                                         continue
8331                                 if s in self._sets:
8332                                         continue
8333                                 # Recursively expand sets so that containment tests in
8334                                 # self._get_parent_sets() properly match atoms in nested
8335                                 # sets (like if world contains system).
8336                                 expanded_set = InternalPackageSet(
8337                                         initial_atoms=getSetAtoms(s))
8338                                 self._sets[s] = expanded_set
8339                                 args.append(SetArg(arg=x, set=expanded_set,
8340                                         root_config=root_config))
8341                         else:
8342                                 if not portage.isvalidatom(x):
8343                                         continue
8344                                 args.append(AtomArg(arg=x, atom=x,
8345                                         root_config=root_config))
8346
8347                 # Create the "args" package set from atoms and
8348                 # packages given as arguments.
8349                 args_set = self._sets["args"]
8350                 for arg in args:
8351                         if not isinstance(arg, (AtomArg, PackageArg)):
8352                                 continue
8353                         myatom = arg.atom
8354                         if myatom in args_set:
8355                                 continue
8356                         args_set.add(myatom)
8357                 self._set_atoms.update(chain(*self._sets.itervalues()))
8358                 atom_arg_map = self._atom_arg_map
8359                 for arg in args:
8360                         for atom in arg.set:
8361                                 atom_key = (atom, arg.root_config.root)
8362                                 refs = atom_arg_map.get(atom_key)
8363                                 if refs is None:
8364                                         refs = []
8365                                         atom_arg_map[atom_key] = refs
8366                                         if arg not in refs:
8367                                                 refs.append(arg)
8368                 return args
8369
8370         class UnsatisfiedResumeDep(portage.exception.PortageException):
8371                 """
8372                 A dependency of a resume list is not installed. This
8373                 can occur when a required package is dropped from the
8374                 merge list via --skipfirst.
8375                 """
8376                 def __init__(self, depgraph, value):
8377                         portage.exception.PortageException.__init__(self, value)
8378                         self.depgraph = depgraph
8379
8380         class _internal_exception(portage.exception.PortageException):
8381                 def __init__(self, value=""):
8382                         portage.exception.PortageException.__init__(self, value)
8383
8384         class _unknown_internal_error(_internal_exception):
8385                 """
8386                 Used by the depgraph internally to terminate graph creation.
8387                 The specific reason for the failure should have been dumped
8388                 to stderr, unfortunately, the exact reason for the failure
8389                 may not be known.
8390                 """
8391
8392         class _serialize_tasks_retry(_internal_exception):
8393                 """
8394                 This is raised by the _serialize_tasks() method when it needs to
8395                 be called again for some reason. The only case that it's currently
8396                 used for is when neglected dependencies need to be added to the
8397                 graph in order to avoid making a potentially unsafe decision.
8398                 """
8399
8400         class _dep_check_composite_db(portage.dbapi):
8401                 """
8402                 A dbapi-like interface that is optimized for use in dep_check() calls.
8403                 This is built on top of the existing depgraph package selection logic.
8404                 Some packages that have been added to the graph may be masked from this
8405                 view in order to influence the atom preference selection that occurs
8406                 via dep_check().
8407                 """
8408                 def __init__(self, depgraph, root):
8409                         portage.dbapi.__init__(self)
8410                         self._depgraph = depgraph
8411                         self._root = root
8412                         self._match_cache = {}
8413                         self._cpv_pkg_map = {}
8414
8415                 def match(self, atom):
8416                         ret = self._match_cache.get(atom)
8417                         if ret is not None:
8418                                 return ret[:]
8419                         orig_atom = atom
8420                         if "/" not in atom:
8421                                 atom = self._dep_expand(atom)
8422                         pkg, existing = self._depgraph._select_package(self._root, atom)
8423                         if not pkg:
8424                                 ret = []
8425                         else:
8426                                 # Return the highest available from select_package() as well as
8427                                 # any matching slots in the graph db.
8428                                 slots = set()
8429                                 slots.add(pkg.metadata["SLOT"])
8430                                 atom_cp = portage.dep_getkey(atom)
8431                                 if pkg.cp.startswith("virtual/"):
8432                                         # For new-style virtual lookahead that occurs inside
8433                                         # dep_check(), examine all slots. This is needed
8434                                         # so that newer slots will not unnecessarily be pulled in
8435                                         # when a satisfying lower slot is already installed. For
8436                                         # example, if virtual/jdk-1.4 is satisfied via kaffe then
8437                                         # there's no need to pull in a newer slot to satisfy a
8438                                         # virtual/jdk dependency.
8439                                         for db, pkg_type, built, installed, db_keys in \
8440                                                 self._depgraph._filtered_trees[self._root]["dbs"]:
8441                                                 for cpv in db.match(atom):
8442                                                         if portage.cpv_getkey(cpv) != pkg.cp:
8443                                                                 continue
8444                                                         slots.add(db.aux_get(cpv, ["SLOT"])[0])
8445                                 ret = []
8446                                 if self._visible(pkg):
8447                                         self._cpv_pkg_map[pkg.cpv] = pkg
8448                                         ret.append(pkg.cpv)
8449                                 slots.remove(pkg.metadata["SLOT"])
8450                                 while slots:
8451                                         slot_atom = "%s:%s" % (atom_cp, slots.pop())
8452                                         pkg, existing = self._depgraph._select_package(
8453                                                 self._root, slot_atom)
8454                                         if not pkg:
8455                                                 continue
8456                                         if not self._visible(pkg):
8457                                                 continue
8458                                         self._cpv_pkg_map[pkg.cpv] = pkg
8459                                         ret.append(pkg.cpv)
8460                                 if ret:
8461                                         self._cpv_sort_ascending(ret)
8462                         self._match_cache[orig_atom] = ret
8463                         return ret[:]
8464
8465                 def _visible(self, pkg):
8466                         if pkg.installed and "selective" not in self._depgraph.myparams:
8467                                 try:
8468                                         arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
8469                                 except (StopIteration, portage.exception.InvalidDependString):
8470                                         arg = None
8471                                 if arg:
8472                                         return False
8473                         if pkg.installed:
8474                                 try:
8475                                         if not visible(
8476                                                 self._depgraph.pkgsettings[pkg.root], pkg):
8477                                                 return False
8478                                 except portage.exception.InvalidDependString:
8479                                         pass
8480                         return True
8481
8482                 def _dep_expand(self, atom):
8483                         """
8484                         This is only needed for old installed packages that may
8485                         contain atoms that are not fully qualified with a specific
8486                         category. Emulate the cpv_expand() function that's used by
8487                         dbapi.match() in cases like this. If there are multiple
8488                         matches, it's often due to a new-style virtual that has
8489                         been added, so try to filter those out to avoid raising
8490                         a ValueError.
8491                         """
8492                         root_config = self._depgraph.roots[self._root]
8493                         orig_atom = atom
8494                         expanded_atoms = self._depgraph._dep_expand(root_config, atom)
8495                         if len(expanded_atoms) > 1:
8496                                 non_virtual_atoms = []
8497                                 for x in expanded_atoms:
8498                                         if not portage.dep_getkey(x).startswith("virtual/"):
8499                                                 non_virtual_atoms.append(x)
8500                                 if len(non_virtual_atoms) == 1:
8501                                         expanded_atoms = non_virtual_atoms
8502                         if len(expanded_atoms) > 1:
8503                                 # compatible with portage.cpv_expand()
8504                                 raise portage.exception.AmbiguousPackageName(
8505                                         [portage.dep_getkey(x) for x in expanded_atoms])
8506                         if expanded_atoms:
8507                                 atom = expanded_atoms[0]
8508                         else:
8509                                 null_atom = insert_category_into_atom(atom, "null")
8510                                 null_cp = portage.dep_getkey(null_atom)
8511                                 cat, atom_pn = portage.catsplit(null_cp)
8512                                 virts_p = root_config.settings.get_virts_p().get(atom_pn)
8513                                 if virts_p:
8514                                         # Allow the resolver to choose which virtual.
8515                                         atom = insert_category_into_atom(atom, "virtual")
8516                                 else:
8517                                         atom = insert_category_into_atom(atom, "null")
8518                         return atom
8519
8520                 def aux_get(self, cpv, wants):
8521                         metadata = self._cpv_pkg_map[cpv].metadata
8522                         return [metadata.get(x, "") for x in wants]
8523
8524 class RepoDisplay(object):
8525         def __init__(self, roots):
8526                 self._shown_repos = {}
8527                 self._unknown_repo = False
8528                 repo_paths = set()
8529                 for root_config in roots.itervalues():
8530                         portdir = root_config.settings.get("PORTDIR")
8531                         if portdir:
8532                                 repo_paths.add(portdir)
8533                         overlays = root_config.settings.get("PORTDIR_OVERLAY")
8534                         if overlays:
8535                                 repo_paths.update(overlays.split())
8536                 repo_paths = list(repo_paths)
8537                 self._repo_paths = repo_paths
8538                 self._repo_paths_real = [ os.path.realpath(repo_path) \
8539                         for repo_path in repo_paths ]
8540
8541                 # pre-allocate index for PORTDIR so that it always has index 0.
8542                 for root_config in roots.itervalues():
8543                         portdb = root_config.trees["porttree"].dbapi
8544                         portdir = portdb.porttree_root
8545                         if portdir:
8546                                 self.repoStr(portdir)
8547
8548         def repoStr(self, repo_path_real):
8549                 real_index = -1
8550                 if repo_path_real:
8551                         real_index = self._repo_paths_real.index(repo_path_real)
8552                 if real_index == -1:
8553                         s = "?"
8554                         self._unknown_repo = True
8555                 else:
8556                         shown_repos = self._shown_repos
8557                         repo_paths = self._repo_paths
8558                         repo_path = repo_paths[real_index]
8559                         index = shown_repos.get(repo_path)
8560                         if index is None:
8561                                 index = len(shown_repos)
8562                                 shown_repos[repo_path] = index
8563                         s = str(index)
8564                 return s
8565
8566         def __str__(self):
8567                 output = []
8568                 shown_repos = self._shown_repos
8569                 unknown_repo = self._unknown_repo
8570                 if shown_repos or self._unknown_repo:
8571                         output.append("Portage tree and overlays:\n")
8572                 show_repo_paths = list(shown_repos)
8573                 for repo_path, repo_index in shown_repos.iteritems():
8574                         show_repo_paths[repo_index] = repo_path
8575                 if show_repo_paths:
8576                         for index, repo_path in enumerate(show_repo_paths):
8577                                 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
8578                 if unknown_repo:
8579                         output.append(" "+teal("[?]") + \
8580                                 " indicates that the source repository could not be determined\n")
8581                 return "".join(output)
8582
8583 class PackageCounters(object):
8584
8585         def __init__(self):
8586                 self.upgrades   = 0
8587                 self.downgrades = 0
8588                 self.new        = 0
8589                 self.newslot    = 0
8590                 self.reinst     = 0
8591                 self.uninst     = 0
8592                 self.blocks     = 0
8593                 self.blocks_satisfied         = 0
8594                 self.totalsize  = 0
8595                 self.restrict_fetch           = 0
8596                 self.restrict_fetch_satisfied = 0
8597                 self.interactive              = 0
8598
8599         def __str__(self):
8600                 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
8601                 myoutput = []
8602                 details = []
8603                 myoutput.append("Total: %s package" % total_installs)
8604                 if total_installs != 1:
8605                         myoutput.append("s")
8606                 if total_installs != 0:
8607                         myoutput.append(" (")
8608                 if self.upgrades > 0:
8609                         details.append("%s upgrade" % self.upgrades)
8610                         if self.upgrades > 1:
8611                                 details[-1] += "s"
8612                 if self.downgrades > 0:
8613                         details.append("%s downgrade" % self.downgrades)
8614                         if self.downgrades > 1:
8615                                 details[-1] += "s"
8616                 if self.new > 0:
8617                         details.append("%s new" % self.new)
8618                 if self.newslot > 0:
8619                         details.append("%s in new slot" % self.newslot)
8620                         if self.newslot > 1:
8621                                 details[-1] += "s"
8622                 if self.reinst > 0:
8623                         details.append("%s reinstall" % self.reinst)
8624                         if self.reinst > 1:
8625                                 details[-1] += "s"
8626                 if self.uninst > 0:
8627                         details.append("%s uninstall" % self.uninst)
8628                         if self.uninst > 1:
8629                                 details[-1] += "s"
8630                 if self.interactive > 0:
8631                         details.append("%s %s" % (self.interactive,
8632                                 colorize("WARN", "interactive")))
8633                 myoutput.append(", ".join(details))
8634                 if total_installs != 0:
8635                         myoutput.append(")")
8636                 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
8637                 if self.restrict_fetch:
8638                         myoutput.append("\nFetch Restriction: %s package" % \
8639                                 self.restrict_fetch)
8640                         if self.restrict_fetch > 1:
8641                                 myoutput.append("s")
8642                 if self.restrict_fetch_satisfied < self.restrict_fetch:
8643                         myoutput.append(bad(" (%s unsatisfied)") % \
8644                                 (self.restrict_fetch - self.restrict_fetch_satisfied))
8645                 if self.blocks > 0:
8646                         myoutput.append("\nConflict: %s block" % \
8647                                 self.blocks)
8648                         if self.blocks > 1:
8649                                 myoutput.append("s")
8650                         if self.blocks_satisfied < self.blocks:
8651                                 myoutput.append(bad(" (%s unsatisfied)") % \
8652                                         (self.blocks - self.blocks_satisfied))
8653                 return "".join(myoutput)
8654
8655 class PollSelectAdapter(PollConstants):
8656
8657         """
8658         Use select to emulate a poll object, for
8659         systems that don't support poll().
8660         """
8661
8662         def __init__(self):
8663                 self._registered = {}
8664                 self._select_args = [[], [], []]
8665
8666         def register(self, fd, *args):
8667                 """
8668                 Only POLLIN is currently supported!
8669                 """
8670                 if len(args) > 1:
8671                         raise TypeError(
8672                                 "register expected at most 2 arguments, got " + \
8673                                 repr(1 + len(args)))
8674
8675                 eventmask = PollConstants.POLLIN | \
8676                         PollConstants.POLLPRI | PollConstants.POLLOUT
8677                 if args:
8678                         eventmask = args[0]
8679
8680                 self._registered[fd] = eventmask
8681                 self._select_args = None
8682
8683         def unregister(self, fd):
8684                 self._select_args = None
8685                 del self._registered[fd]
8686
8687         def poll(self, *args):
8688                 if len(args) > 1:
8689                         raise TypeError(
8690                                 "poll expected at most 2 arguments, got " + \
8691                                 repr(1 + len(args)))
8692
8693                 timeout = None
8694                 if args:
8695                         timeout = args[0]
8696
8697                 select_args = self._select_args
8698                 if select_args is None:
8699                         select_args = [self._registered.keys(), [], []]
8700
8701                 if timeout is not None:
8702                         select_args = select_args[:]
8703                         # Translate poll() timeout args to select() timeout args:
8704                         #
8705                         #          | units        | value(s) for indefinite block
8706                         # ---------|--------------|------------------------------
8707                         #   poll   | milliseconds | omitted, negative, or None
8708                         # ---------|--------------|------------------------------
8709                         #   select | seconds      | omitted
8710                         # ---------|--------------|------------------------------
8711
8712                         if timeout is not None and timeout < 0:
8713                                 timeout = None
8714                         if timeout is not None:
8715                                 select_args.append(timeout / 1000)
8716
8717                 select_events = select.select(*select_args)
8718                 poll_events = []
8719                 for fd in select_events[0]:
8720                         poll_events.append((fd, PollConstants.POLLIN))
8721                 return poll_events
8722
8723 class SequentialTaskQueue(SlotObject):
8724
8725         __slots__ = ("max_jobs", "running_tasks") + \
8726                 ("_dirty", "_scheduling", "_task_queue")
8727
8728         def __init__(self, **kwargs):
8729                 SlotObject.__init__(self, **kwargs)
8730                 self._task_queue = deque()
8731                 self.running_tasks = set()
8732                 if self.max_jobs is None:
8733                         self.max_jobs = 1
8734                 self._dirty = True
8735
8736         def add(self, task):
8737                 self._task_queue.append(task)
8738                 self._dirty = True
8739
8740         def addFront(self, task):
8741                 self._task_queue.appendleft(task)
8742                 self._dirty = True
8743
8744         def schedule(self):
8745
8746                 if not self._dirty:
8747                         return False
8748
8749                 if not self:
8750                         return False
8751
8752                 if self._scheduling:
8753                         # Ignore any recursive schedule() calls triggered via
8754                         # self._task_exit().
8755                         return False
8756
8757                 self._scheduling = True
8758
8759                 task_queue = self._task_queue
8760                 running_tasks = self.running_tasks
8761                 max_jobs = self.max_jobs
8762                 state_changed = False
8763
8764                 while task_queue and \
8765                         (max_jobs is True or len(running_tasks) < max_jobs):
8766                         task = task_queue.popleft()
8767                         cancelled = getattr(task, "cancelled", None)
8768                         if not cancelled:
8769                                 running_tasks.add(task)
8770                                 task.addExitListener(self._task_exit)
8771                                 task.start()
8772                         state_changed = True
8773
8774                 self._dirty = False
8775                 self._scheduling = False
8776
8777                 return state_changed
8778
8779         def _task_exit(self, task):
8780                 """
8781                 Since we can always rely on exit listeners being called, the set of
8782                 running tasks is always pruned automatically and there is never any need
8783                 to actively prune it.
8784                 """
8785                 self.running_tasks.remove(task)
8786                 if self._task_queue:
8787                         self._dirty = True
8788
8789         def clear(self):
8790                 self._task_queue.clear()
8791                 running_tasks = self.running_tasks
8792                 while running_tasks:
8793                         task = running_tasks.pop()
8794                         task.removeExitListener(self._task_exit)
8795                         task.cancel()
8796                 self._dirty = False
8797
8798         def __nonzero__(self):
8799                 return bool(self._task_queue or self.running_tasks)
8800
8801         def __len__(self):
8802                 return len(self._task_queue) + len(self.running_tasks)
8803
8804 _can_poll_device = None
8805
8806 def can_poll_device():
8807         """
8808         Test if it's possible to use poll() on a device such as a pty. This
8809         is known to fail on Darwin.
8810         @rtype: bool
8811         @returns: True if poll() on a device succeeds, False otherwise.
8812         """
8813
8814         global _can_poll_device
8815         if _can_poll_device is not None:
8816                 return _can_poll_device
8817
8818         if not hasattr(select, "poll"):
8819                 _can_poll_device = False
8820                 return _can_poll_device
8821
8822         try:
8823                 dev_null = open('/dev/null', 'rb')
8824         except IOError:
8825                 _can_poll_device = False
8826                 return _can_poll_device
8827
8828         p = select.poll()
8829         p.register(dev_null.fileno(), PollConstants.POLLIN)
8830
8831         invalid_request = False
8832         for f, event in p.poll():
8833                 if event & PollConstants.POLLNVAL:
8834                         invalid_request = True
8835                         break
8836         dev_null.close()
8837
8838         _can_poll_device = not invalid_request
8839         return _can_poll_device
8840
8841 def create_poll_instance():
8842         """
8843         Create an instance of select.poll, or an instance of
8844         PollSelectAdapter there is no poll() implementation or
8845         it is broken somehow.
8846         """
8847         if can_poll_device():
8848                 return select.poll()
8849         return PollSelectAdapter()
8850
8851 class PollScheduler(object):
8852
8853         class _sched_iface_class(SlotObject):
8854                 __slots__ = ("register", "schedule", "unregister")
8855
8856         def __init__(self):
8857                 self._max_jobs = 1
8858                 self._max_load = None
8859                 self._jobs = 0
8860                 self._poll_event_queue = []
8861                 self._poll_event_handlers = {}
8862                 self._poll_event_handler_ids = {}
8863                 # Increment id for each new handler.
8864                 self._event_handler_id = 0
8865                 self._poll_obj = create_poll_instance()
8866                 self._scheduling = False
8867
8868         def _schedule(self):
8869                 """
8870                 Calls _schedule_tasks() and automatically returns early from
8871                 any recursive calls to this method that the _schedule_tasks()
8872                 call might trigger. This makes _schedule() safe to call from
8873                 inside exit listeners.
8874                 """
8875                 if self._scheduling:
8876                         return False
8877                 self._scheduling = True
8878                 try:
8879                         return self._schedule_tasks()
8880                 finally:
8881                         self._scheduling = False
8882
8883         def _running_job_count(self):
8884                 return self._jobs
8885
8886         def _can_add_job(self):
8887                 max_jobs = self._max_jobs
8888                 max_load = self._max_load
8889
8890                 if self._max_jobs is not True and \
8891                         self._running_job_count() >= self._max_jobs:
8892                         return False
8893
8894                 if max_load is not None and \
8895                         (max_jobs is True or max_jobs > 1) and \
8896                         self._running_job_count() >= 1:
8897                         try:
8898                                 avg1, avg5, avg15 = os.getloadavg()
8899                         except (AttributeError, OSError), e:
8900                                 writemsg("!!! getloadavg() failed: %s\n" % (e,),
8901                                         noiselevel=-1)
8902                                 del e
8903                                 return False
8904
8905                         if avg1 >= max_load:
8906                                 return False
8907
8908                 return True
8909
8910         def _poll(self, timeout=None):
8911                 """
8912                 All poll() calls pass through here. The poll events
8913                 are added directly to self._poll_event_queue.
8914                 In order to avoid endless blocking, this raises
8915                 StopIteration if timeout is None and there are
8916                 no file descriptors to poll.
8917                 """
8918                 if not self._poll_event_handlers:
8919                         self._schedule()
8920                         if timeout is None and \
8921                                 not self._poll_event_handlers:
8922                                 raise StopIteration(
8923                                         "timeout is None and there are no poll() event handlers")
8924
8925                 # The following error is known to occur with Linux kernel versions
8926                 # less than 2.6.24:
8927                 #
8928                 #   select.error: (4, 'Interrupted system call')
8929                 #
8930                 # This error has been observed after a SIGSTOP, followed by SIGCONT.
8931                 # Treat it similar to EAGAIN if timeout is None, otherwise just return
8932                 # without any events.
8933                 while True:
8934                         try:
8935                                 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
8936                                 break
8937                         except select.error, e:
8938                                 writemsg_level("\n!!! select error: %s\n" % (e,),
8939                                         level=logging.ERROR, noiselevel=-1)
8940                                 del e
8941                                 if timeout is not None:
8942                                         break
8943
8944         def _next_poll_event(self, timeout=None):
8945                 """
8946                 Since the _schedule_wait() loop is called by event
8947                 handlers from _poll_loop(), maintain a central event
8948                 queue for both of them to share events from a single
8949                 poll() call. In order to avoid endless blocking, this
8950                 raises StopIteration if timeout is None and there are
8951                 no file descriptors to poll.
8952                 """
8953                 if not self._poll_event_queue:
8954                         self._poll(timeout)
8955                 return self._poll_event_queue.pop()
8956
8957         def _poll_loop(self):
8958
8959                 event_handlers = self._poll_event_handlers
8960                 event_handled = False
8961
8962                 try:
8963                         while event_handlers:
8964                                 f, event = self._next_poll_event()
8965                                 handler, reg_id = event_handlers[f]
8966                                 handler(f, event)
8967                                 event_handled = True
8968                 except StopIteration:
8969                         event_handled = True
8970
8971                 if not event_handled:
8972                         raise AssertionError("tight loop")
8973
8974         def _schedule_yield(self):
8975                 """
8976                 Schedule for a short period of time chosen by the scheduler based
8977                 on internal state. Synchronous tasks should call this periodically
8978                 in order to allow the scheduler to service pending poll events. The
8979                 scheduler will call poll() exactly once, without blocking, and any
8980                 resulting poll events will be serviced.
8981                 """
8982                 event_handlers = self._poll_event_handlers
8983                 events_handled = 0
8984
8985                 if not event_handlers:
8986                         return bool(events_handled)
8987
8988                 if not self._poll_event_queue:
8989                         self._poll(0)
8990
8991                 try:
8992                         while event_handlers and self._poll_event_queue:
8993                                 f, event = self._next_poll_event()
8994                                 handler, reg_id = event_handlers[f]
8995                                 handler(f, event)
8996                                 events_handled += 1
8997                 except StopIteration:
8998                         events_handled += 1
8999
9000                 return bool(events_handled)
9001
9002         def _register(self, f, eventmask, handler):
9003                 """
9004                 @rtype: Integer
9005                 @return: A unique registration id, for use in schedule() or
9006                         unregister() calls.
9007                 """
9008                 if f in self._poll_event_handlers:
9009                         raise AssertionError("fd %d is already registered" % f)
9010                 self._event_handler_id += 1
9011                 reg_id = self._event_handler_id
9012                 self._poll_event_handler_ids[reg_id] = f
9013                 self._poll_event_handlers[f] = (handler, reg_id)
9014                 self._poll_obj.register(f, eventmask)
9015                 return reg_id
9016
9017         def _unregister(self, reg_id):
9018                 f = self._poll_event_handler_ids[reg_id]
9019                 self._poll_obj.unregister(f)
9020                 del self._poll_event_handlers[f]
9021                 del self._poll_event_handler_ids[reg_id]
9022
9023         def _schedule_wait(self, wait_ids):
9024                 """
9025                 Schedule until wait_id is not longer registered
9026                 for poll() events.
9027                 @type wait_id: int
9028                 @param wait_id: a task id to wait for
9029                 """
9030                 event_handlers = self._poll_event_handlers
9031                 handler_ids = self._poll_event_handler_ids
9032                 event_handled = False
9033
9034                 if isinstance(wait_ids, int):
9035                         wait_ids = frozenset([wait_ids])
9036
9037                 try:
9038                         while wait_ids.intersection(handler_ids):
9039                                 f, event = self._next_poll_event()
9040                                 handler, reg_id = event_handlers[f]
9041                                 handler(f, event)
9042                                 event_handled = True
9043                 except StopIteration:
9044                         event_handled = True
9045
9046                 return event_handled
9047
9048 class QueueScheduler(PollScheduler):
9049
9050         """
9051         Add instances of SequentialTaskQueue and then call run(). The
9052         run() method returns when no tasks remain.
9053         """
9054
9055         def __init__(self, max_jobs=None, max_load=None):
9056                 PollScheduler.__init__(self)
9057
9058                 if max_jobs is None:
9059                         max_jobs = 1
9060
9061                 self._max_jobs = max_jobs
9062                 self._max_load = max_load
9063                 self.sched_iface = self._sched_iface_class(
9064                         register=self._register,
9065                         schedule=self._schedule_wait,
9066                         unregister=self._unregister)
9067
9068                 self._queues = []
9069                 self._schedule_listeners = []
9070
9071         def add(self, q):
9072                 self._queues.append(q)
9073
9074         def remove(self, q):
9075                 self._queues.remove(q)
9076
9077         def run(self):
9078
9079                 while self._schedule():
9080                         self._poll_loop()
9081
9082                 while self._running_job_count():
9083                         self._poll_loop()
9084
9085         def _schedule_tasks(self):
9086                 """
9087                 @rtype: bool
9088                 @returns: True if there may be remaining tasks to schedule,
9089                         False otherwise.
9090                 """
9091                 while self._can_add_job():
9092                         n = self._max_jobs - self._running_job_count()
9093                         if n < 1:
9094                                 break
9095
9096                         if not self._start_next_job(n):
9097                                 return False
9098
9099                 for q in self._queues:
9100                         if q:
9101                                 return True
9102                 return False
9103
9104         def _running_job_count(self):
9105                 job_count = 0
9106                 for q in self._queues:
9107                         job_count += len(q.running_tasks)
9108                 self._jobs = job_count
9109                 return job_count
9110
9111         def _start_next_job(self, n=1):
9112                 started_count = 0
9113                 for q in self._queues:
9114                         initial_job_count = len(q.running_tasks)
9115                         q.schedule()
9116                         final_job_count = len(q.running_tasks)
9117                         if final_job_count > initial_job_count:
9118                                 started_count += (final_job_count - initial_job_count)
9119                         if started_count >= n:
9120                                 break
9121                 return started_count
9122
9123 class TaskScheduler(object):
9124
9125         """
9126         A simple way to handle scheduling of AsynchrousTask instances. Simply
9127         add tasks and call run(). The run() method returns when no tasks remain.
9128         """
9129
9130         def __init__(self, max_jobs=None, max_load=None):
9131                 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9132                 self._scheduler = QueueScheduler(
9133                         max_jobs=max_jobs, max_load=max_load)
9134                 self.sched_iface = self._scheduler.sched_iface
9135                 self.run = self._scheduler.run
9136                 self._scheduler.add(self._queue)
9137
9138         def add(self, task):
9139                 self._queue.add(task)
9140
9141 class JobStatusDisplay(object):
9142
9143         _bound_properties = ("curval", "failed", "running")
9144         _jobs_column_width = 48
9145
9146         # Don't update the display unless at least this much
9147         # time has passed, in units of seconds.
9148         _min_display_latency = 2
9149
9150         _default_term_codes = {
9151                 'cr'  : '\r',
9152                 'el'  : '\x1b[K',
9153                 'nel' : '\n',
9154         }
9155
9156         _termcap_name_map = {
9157                 'carriage_return' : 'cr',
9158                 'clr_eol'         : 'el',
9159                 'newline'         : 'nel',
9160         }
9161
9162         def __init__(self, out=sys.stdout, quiet=False):
9163                 object.__setattr__(self, "out", out)
9164                 object.__setattr__(self, "quiet", quiet)
9165                 object.__setattr__(self, "maxval", 0)
9166                 object.__setattr__(self, "merges", 0)
9167                 object.__setattr__(self, "_changed", False)
9168                 object.__setattr__(self, "_displayed", False)
9169                 object.__setattr__(self, "_last_display_time", 0)
9170                 object.__setattr__(self, "width", 80)
9171                 self.reset()
9172
9173                 isatty = hasattr(out, "isatty") and out.isatty()
9174                 object.__setattr__(self, "_isatty", isatty)
9175                 if not isatty or not self._init_term():
9176                         term_codes = {}
9177                         for k, capname in self._termcap_name_map.iteritems():
9178                                 term_codes[k] = self._default_term_codes[capname]
9179                         object.__setattr__(self, "_term_codes", term_codes)
9180
9181         def _init_term(self):
9182                 """
9183                 Initialize term control codes.
9184                 @rtype: bool
9185                 @returns: True if term codes were successfully initialized,
9186                         False otherwise.
9187                 """
9188
9189                 term_type = os.environ.get("TERM", "vt100")
9190                 tigetstr = None
9191
9192                 try:
9193                         import curses
9194                         try:
9195                                 curses.setupterm(term_type, self.out.fileno())
9196                                 tigetstr = curses.tigetstr
9197                         except curses.error:
9198                                 pass
9199                 except ImportError:
9200                         pass
9201
9202                 if tigetstr is None:
9203                         return False
9204
9205                 term_codes = {}
9206                 for k, capname in self._termcap_name_map.iteritems():
9207                         code = tigetstr(capname)
9208                         if code is None:
9209                                 code = self._default_term_codes[capname]
9210                         term_codes[k] = code
9211                 object.__setattr__(self, "_term_codes", term_codes)
9212                 return True
9213
9214         def _format_msg(self, msg):
9215                 return ">>> %s" % msg
9216
9217         def _erase(self):
9218                 self.out.write(
9219                         self._term_codes['carriage_return'] + \
9220                         self._term_codes['clr_eol'])
9221                 self.out.flush()
9222                 self._displayed = False
9223
9224         def _display(self, line):
9225                 self.out.write(line)
9226                 self.out.flush()
9227                 self._displayed = True
9228
9229         def _update(self, msg):
9230
9231                 out = self.out
9232                 if not self._isatty:
9233                         out.write(self._format_msg(msg) + self._term_codes['newline'])
9234                         self.out.flush()
9235                         self._displayed = True
9236                         return
9237
9238                 if self._displayed:
9239                         self._erase()
9240
9241                 self._display(self._format_msg(msg))
9242
9243         def displayMessage(self, msg):
9244
9245                 was_displayed = self._displayed
9246
9247                 if self._isatty and self._displayed:
9248                         self._erase()
9249
9250                 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9251                 self.out.flush()
9252                 self._displayed = False
9253
9254                 if was_displayed:
9255                         self._changed = True
9256                         self.display()
9257
9258         def reset(self):
9259                 self.maxval = 0
9260                 self.merges = 0
9261                 for name in self._bound_properties:
9262                         object.__setattr__(self, name, 0)
9263
9264                 if self._displayed:
9265                         self.out.write(self._term_codes['newline'])
9266                         self.out.flush()
9267                         self._displayed = False
9268
9269         def __setattr__(self, name, value):
9270                 old_value = getattr(self, name)
9271                 if value == old_value:
9272                         return
9273                 object.__setattr__(self, name, value)
9274                 if name in self._bound_properties:
9275                         self._property_change(name, old_value, value)
9276
9277         def _property_change(self, name, old_value, new_value):
9278                 self._changed = True
9279                 self.display()
9280
9281         def _load_avg_str(self):
9282                 try:
9283                         avg = os.getloadavg()
9284                 except (AttributeError, OSError), e:
9285                         return str(e)
9286
9287                 max_avg = max(avg)
9288
9289                 if max_avg < 10:
9290                         digits = 2
9291                 elif max_avg < 100:
9292                         digits = 1
9293                 else:
9294                         digits = 0
9295
9296                 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9297
9298         def display(self):
9299                 """
9300                 Display status on stdout, but only if something has
9301                 changed since the last call.
9302                 """
9303
9304                 if self.quiet:
9305                         return
9306
9307                 current_time = time.time()
9308                 time_delta = current_time - self._last_display_time
9309                 if self._displayed and \
9310                         not self._changed:
9311                         if not self._isatty:
9312                                 return
9313                         if time_delta < self._min_display_latency:
9314                                 return
9315
9316                 self._last_display_time = current_time
9317                 self._changed = False
9318                 self._display_status()
9319
9320         def _display_status(self):
9321                 # Don't use len(self._completed_tasks) here since that also
9322                 # can include uninstall tasks.
9323                 curval_str = str(self.curval)
9324                 maxval_str = str(self.maxval)
9325                 running_str = str(self.running)
9326                 failed_str = str(self.failed)
9327                 load_avg_str = self._load_avg_str()
9328
9329                 color_output = StringIO.StringIO()
9330                 plain_output = StringIO.StringIO()
9331                 style_file = portage.output.ConsoleStyleFile(color_output)
9332                 style_file.write_listener = plain_output
9333                 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9334                 style_writer.style_listener = style_file.new_styles
9335                 f = formatter.AbstractFormatter(style_writer)
9336
9337                 number_style = "INFORM"
9338                 f.add_literal_data("Jobs: ")
9339                 f.push_style(number_style)
9340                 f.add_literal_data(curval_str)
9341                 f.pop_style()
9342                 f.add_literal_data(" of ")
9343                 f.push_style(number_style)
9344                 f.add_literal_data(maxval_str)
9345                 f.pop_style()
9346                 f.add_literal_data(" complete")
9347
9348                 if self.running:
9349                         f.add_literal_data(", ")
9350                         f.push_style(number_style)
9351                         f.add_literal_data(running_str)
9352                         f.pop_style()
9353                         f.add_literal_data(" running")
9354
9355                 if self.failed:
9356                         f.add_literal_data(", ")
9357                         f.push_style(number_style)
9358                         f.add_literal_data(failed_str)
9359                         f.pop_style()
9360                         f.add_literal_data(" failed")
9361
9362                 padding = self._jobs_column_width - len(plain_output.getvalue())
9363                 if padding > 0:
9364                         f.add_literal_data(padding * " ")
9365
9366                 f.add_literal_data("Load avg: ")
9367                 f.add_literal_data(load_avg_str)
9368
9369                 # Truncate to fit width, to avoid making the terminal scroll if the
9370                 # line overflows (happens when the load average is large).
9371                 plain_output = plain_output.getvalue()
9372                 if self._isatty and len(plain_output) > self.width:
9373                         # Use plain_output here since it's easier to truncate
9374                         # properly than the color output which contains console
9375                         # color codes.
9376                         self._update(plain_output[:self.width])
9377                 else:
9378                         self._update(color_output.getvalue())
9379
9380                 xtermTitle(" ".join(plain_output.split()))
9381
9382 class Scheduler(PollScheduler):
9383
9384         _opts_ignore_blockers = \
9385                 frozenset(["--buildpkgonly",
9386                 "--fetchonly", "--fetch-all-uri",
9387                 "--nodeps", "--pretend"])
9388
9389         _opts_no_background = \
9390                 frozenset(["--pretend",
9391                 "--fetchonly", "--fetch-all-uri"])
9392
9393         _opts_no_restart = frozenset(["--buildpkgonly",
9394                 "--fetchonly", "--fetch-all-uri", "--pretend"])
9395
9396         _bad_resume_opts = set(["--ask", "--changelog",
9397                 "--resume", "--skipfirst"])
9398
9399         _fetch_log = "/var/log/emerge-fetch.log"
9400
9401         class _iface_class(SlotObject):
9402                 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
9403                         "dblinkElog", "fetch", "register", "schedule",
9404                         "scheduleSetup", "scheduleUnpack", "scheduleYield",
9405                         "unregister")
9406
9407         class _fetch_iface_class(SlotObject):
9408                 __slots__ = ("log_file", "schedule")
9409
9410         _task_queues_class = slot_dict_class(
9411                 ("merge", "jobs", "fetch", "unpack"), prefix="")
9412
9413         class _build_opts_class(SlotObject):
9414                 __slots__ = ("buildpkg", "buildpkgonly",
9415                         "fetch_all_uri", "fetchonly", "pretend")
9416
9417         class _binpkg_opts_class(SlotObject):
9418                 __slots__ = ("fetchonly", "getbinpkg", "pretend")
9419
9420         class _pkg_count_class(SlotObject):
9421                 __slots__ = ("curval", "maxval")
9422
9423         class _emerge_log_class(SlotObject):
9424                 __slots__ = ("xterm_titles",)
9425
9426                 def log(self, *pargs, **kwargs):
9427                         if not self.xterm_titles:
9428                                 # Avoid interference with the scheduler's status display.
9429                                 kwargs.pop("short_msg", None)
9430                         emergelog(self.xterm_titles, *pargs, **kwargs)
9431
9432         class _failed_pkg(SlotObject):
9433                 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
9434
9435         class _ConfigPool(object):
9436                 """Interface for a task to temporarily allocate a config
9437                 instance from a pool. This allows a task to be constructed
9438                 long before the config instance actually becomes needed, like
9439                 when prefetchers are constructed for the whole merge list."""
9440                 __slots__ = ("_root", "_allocate", "_deallocate")
9441                 def __init__(self, root, allocate, deallocate):
9442                         self._root = root
9443                         self._allocate = allocate
9444                         self._deallocate = deallocate
9445                 def allocate(self):
9446                         return self._allocate(self._root)
9447                 def deallocate(self, settings):
9448                         self._deallocate(settings)
9449
9450         class _unknown_internal_error(portage.exception.PortageException):
9451                 """
9452                 Used internally to terminate scheduling. The specific reason for
9453                 the failure should have been dumped to stderr.
9454                 """
9455                 def __init__(self, value=""):
9456                         portage.exception.PortageException.__init__(self, value)
9457
9458         def __init__(self, settings, trees, mtimedb, myopts,
9459                 spinner, mergelist, favorites, digraph):
9460                 PollScheduler.__init__(self)
9461                 self.settings = settings
9462                 self.target_root = settings["ROOT"]
9463                 self.trees = trees
9464                 self.myopts = myopts
9465                 self._spinner = spinner
9466                 self._mtimedb = mtimedb
9467                 self._mergelist = mergelist
9468                 self._favorites = favorites
9469                 self._args_set = InternalPackageSet(favorites)
9470                 self._build_opts = self._build_opts_class()
9471                 for k in self._build_opts.__slots__:
9472                         setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
9473                 self._binpkg_opts = self._binpkg_opts_class()
9474                 for k in self._binpkg_opts.__slots__:
9475                         setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
9476
9477                 self.curval = 0
9478                 self._logger = self._emerge_log_class()
9479                 self._task_queues = self._task_queues_class()
9480                 for k in self._task_queues.allowed_keys:
9481                         setattr(self._task_queues, k,
9482                                 SequentialTaskQueue())
9483                 self._status_display = JobStatusDisplay()
9484                 self._max_load = myopts.get("--load-average")
9485                 max_jobs = myopts.get("--jobs")
9486                 if max_jobs is None:
9487                         max_jobs = 1
9488                 self._set_max_jobs(max_jobs)
9489
9490                 # The root where the currently running
9491                 # portage instance is installed.
9492                 self._running_root = trees["/"]["root_config"]
9493                 self.edebug = 0
9494                 if settings.get("PORTAGE_DEBUG", "") == "1":
9495                         self.edebug = 1
9496                 self.pkgsettings = {}
9497                 self._config_pool = {}
9498                 self._blocker_db = {}
9499                 for root in trees:
9500                         self._config_pool[root] = []
9501                         self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
9502
9503                 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
9504                         schedule=self._schedule_fetch)
9505                 self._sched_iface = self._iface_class(
9506                         dblinkEbuildPhase=self._dblink_ebuild_phase,
9507                         dblinkDisplayMerge=self._dblink_display_merge,
9508                         dblinkElog=self._dblink_elog,
9509                         fetch=fetch_iface, register=self._register,
9510                         schedule=self._schedule_wait,
9511                         scheduleSetup=self._schedule_setup,
9512                         scheduleUnpack=self._schedule_unpack,
9513                         scheduleYield=self._schedule_yield,
9514                         unregister=self._unregister)
9515
9516                 self._prefetchers = weakref.WeakValueDictionary()
9517                 self._pkg_queue = []
9518                 self._completed_tasks = set()
9519
9520                 self._failed_pkgs = []
9521                 self._failed_pkgs_all = []
9522                 self._failed_pkgs_die_msgs = []
9523                 self._post_mod_echo_msgs = []
9524                 self._parallel_fetch = False
9525                 merge_count = len([x for x in mergelist \
9526                         if isinstance(x, Package) and x.operation == "merge"])
9527                 self._pkg_count = self._pkg_count_class(
9528                         curval=0, maxval=merge_count)
9529                 self._status_display.maxval = self._pkg_count.maxval
9530
9531                 # The load average takes some time to respond when new
9532                 # jobs are added, so we need to limit the rate of adding
9533                 # new jobs.
9534                 self._job_delay_max = 10
9535                 self._job_delay_factor = 1.0
9536                 self._job_delay_exp = 1.5
9537                 self._previous_job_start_time = None
9538
9539                 self._set_digraph(digraph)
9540
9541                 # This is used to memoize the _choose_pkg() result when
9542                 # no packages can be chosen until one of the existing
9543                 # jobs completes.
9544                 self._choose_pkg_return_early = False
9545
9546                 features = self.settings.features
9547                 if "parallel-fetch" in features and \
9548                         not ("--pretend" in self.myopts or \
9549                         "--fetch-all-uri" in self.myopts or \
9550                         "--fetchonly" in self.myopts):
9551                         if "distlocks" not in features:
9552                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
9553                                 portage.writemsg(red("!!!")+" parallel-fetching " + \
9554                                         "requires the distlocks feature enabled"+"\n",
9555                                         noiselevel=-1)
9556                                 portage.writemsg(red("!!!")+" you have it disabled, " + \
9557                                         "thus parallel-fetching is being disabled"+"\n",
9558                                         noiselevel=-1)
9559                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
9560                         elif len(mergelist) > 1:
9561                                 self._parallel_fetch = True
9562
9563                 if self._parallel_fetch:
9564                                 # clear out existing fetch log if it exists
9565                                 try:
9566                                         open(self._fetch_log, 'w')
9567                                 except EnvironmentError:
9568                                         pass
9569
9570                 self._running_portage = None
9571                 portage_match = self._running_root.trees["vartree"].dbapi.match(
9572                         portage.const.PORTAGE_PACKAGE_ATOM)
9573                 if portage_match:
9574                         cpv = portage_match.pop()
9575                         self._running_portage = self._pkg(cpv, "installed",
9576                                 self._running_root, installed=True)
9577
9578         def _poll(self, timeout=None):
9579                 self._schedule()
9580                 PollScheduler._poll(self, timeout=timeout)
9581
9582         def _set_max_jobs(self, max_jobs):
9583                 self._max_jobs = max_jobs
9584                 self._task_queues.jobs.max_jobs = max_jobs
9585
9586         def _background_mode(self):
9587                 """
9588                 Check if background mode is enabled and adjust states as necessary.
9589
9590                 @rtype: bool
9591                 @returns: True if background mode is enabled, False otherwise.
9592                 """
9593                 background = (self._max_jobs is True or \
9594                         self._max_jobs > 1 or "--quiet" in self.myopts) and \
9595                         not bool(self._opts_no_background.intersection(self.myopts))
9596
9597                 if background:
9598                         interactive_tasks = self._get_interactive_tasks()
9599                         if interactive_tasks:
9600                                 background = False
9601                                 writemsg_level(">>> Sending package output to stdio due " + \
9602                                         "to interactive package(s):\n",
9603                                         level=logging.INFO, noiselevel=-1)
9604                                 msg = [""]
9605                                 for pkg in interactive_tasks:
9606                                         pkg_str = "  " + colorize("INFORM", str(pkg.cpv))
9607                                         if pkg.root != "/":
9608                                                 pkg_str += " for " + pkg.root
9609                                         msg.append(pkg_str)
9610                                 msg.append("")
9611                                 writemsg_level("".join("%s\n" % (l,) for l in msg),
9612                                         level=logging.INFO, noiselevel=-1)
9613                                 if self._max_jobs is True or self._max_jobs > 1:
9614                                         self._set_max_jobs(1)
9615                                         writemsg_level(">>> Setting --jobs=1 due " + \
9616                                                 "to the above interactive package(s)\n",
9617                                                 level=logging.INFO, noiselevel=-1)
9618
9619                 self._status_display.quiet = \
9620                         not background or \
9621                         ("--quiet" in self.myopts and \
9622                         "--verbose" not in self.myopts)
9623
9624                 self._logger.xterm_titles = \
9625                         "notitles" not in self.settings.features and \
9626                         self._status_display.quiet
9627
9628                 return background
9629
9630         def _get_interactive_tasks(self):
9631                 from portage import flatten
9632                 from portage.dep import use_reduce, paren_reduce
9633                 interactive_tasks = []
9634                 for task in self._mergelist:
9635                         if not (isinstance(task, Package) and \
9636                                 task.operation == "merge"):
9637                                 continue
9638                         try:
9639                                 properties = flatten(use_reduce(paren_reduce(
9640                                         task.metadata["PROPERTIES"]), uselist=task.use.enabled))
9641                         except portage.exception.InvalidDependString, e:
9642                                 show_invalid_depstring_notice(task,
9643                                         task.metadata["PROPERTIES"], str(e))
9644                                 raise self._unknown_internal_error()
9645                         if "interactive" in properties:
9646                                 interactive_tasks.append(task)
9647                 return interactive_tasks
9648
9649         def _set_digraph(self, digraph):
9650                 if "--nodeps" in self.myopts or \
9651                         (self._max_jobs is not True and self._max_jobs < 2):
9652                         # save some memory
9653                         self._digraph = None
9654                         return
9655
9656                 self._digraph = digraph
9657                 self._prune_digraph()
9658
9659         def _prune_digraph(self):
9660                 """
9661                 Prune any root nodes that are irrelevant.
9662                 """
9663
9664                 graph = self._digraph
9665                 completed_tasks = self._completed_tasks
9666                 removed_nodes = set()
9667                 while True:
9668                         for node in graph.root_nodes():
9669                                 if not isinstance(node, Package) or \
9670                                         (node.installed and node.operation == "nomerge") or \
9671                                         node.onlydeps or \
9672                                         node in completed_tasks:
9673                                         removed_nodes.add(node)
9674                         if removed_nodes:
9675                                 graph.difference_update(removed_nodes)
9676                         if not removed_nodes:
9677                                 break
9678                         removed_nodes.clear()
9679
9680         class _pkg_failure(portage.exception.PortageException):
9681                 """
9682                 An instance of this class is raised by unmerge() when
9683                 an uninstallation fails.
9684                 """
9685                 status = 1
9686                 def __init__(self, *pargs):
9687                         portage.exception.PortageException.__init__(self, pargs)
9688                         if pargs:
9689                                 self.status = pargs[0]
9690
9691         def _schedule_fetch(self, fetcher):
9692                 """
9693                 Schedule a fetcher on the fetch queue, in order to
9694                 serialize access to the fetch log.
9695                 """
9696                 self._task_queues.fetch.addFront(fetcher)
9697
9698         def _schedule_setup(self, setup_phase):
9699                 """
9700                 Schedule a setup phase on the merge queue, in order to
9701                 serialize unsandboxed access to the live filesystem.
9702                 """
9703                 self._task_queues.merge.addFront(setup_phase)
9704                 self._schedule()
9705
9706         def _schedule_unpack(self, unpack_phase):
9707                 """
9708                 Schedule an unpack phase on the unpack queue, in order
9709                 to serialize $DISTDIR access for live ebuilds.
9710                 """
9711                 self._task_queues.unpack.add(unpack_phase)
9712
9713         def _find_blockers(self, new_pkg):
9714                 """
9715                 Returns a callable which should be called only when
9716                 the vdb lock has been acquired.
9717                 """
9718                 def get_blockers():
9719                         return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
9720                 return get_blockers
9721
9722         def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
9723                 if self._opts_ignore_blockers.intersection(self.myopts):
9724                         return None
9725
9726                 # Call gc.collect() here to avoid heap overflow that
9727                 # triggers 'Cannot allocate memory' errors (reported
9728                 # with python-2.5).
9729                 import gc
9730                 gc.collect()
9731
9732                 blocker_db = self._blocker_db[new_pkg.root]
9733
9734                 blocker_dblinks = []
9735                 for blocking_pkg in blocker_db.findInstalledBlockers(
9736                         new_pkg, acquire_lock=acquire_lock):
9737                         if new_pkg.slot_atom == blocking_pkg.slot_atom:
9738                                 continue
9739                         if new_pkg.cpv == blocking_pkg.cpv:
9740                                 continue
9741                         blocker_dblinks.append(portage.dblink(
9742                                 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
9743                                 self.pkgsettings[blocking_pkg.root], treetype="vartree",
9744                                 vartree=self.trees[blocking_pkg.root]["vartree"]))
9745
9746                 gc.collect()
9747
9748                 return blocker_dblinks
9749
9750         def _dblink_pkg(self, pkg_dblink):
9751                 cpv = pkg_dblink.mycpv
9752                 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
9753                 root_config = self.trees[pkg_dblink.myroot]["root_config"]
9754                 installed = type_name == "installed"
9755                 return self._pkg(cpv, type_name, root_config, installed=installed)
9756
9757         def _append_to_log_path(self, log_path, msg):
9758                 f = open(log_path, 'a')
9759                 try:
9760                         f.write(msg)
9761                 finally:
9762                         f.close()
9763
9764         def _dblink_elog(self, pkg_dblink, phase, func, msgs):
9765
9766                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
9767                 log_file = None
9768                 out = sys.stdout
9769                 background = self._background
9770
9771                 if background and log_path is not None:
9772                         log_file = open(log_path, 'a')
9773                         out = log_file
9774
9775                 try:
9776                         for msg in msgs:
9777                                 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
9778                 finally:
9779                         if log_file is not None:
9780                                 log_file.close()
9781
9782         def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
9783                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
9784                 background = self._background
9785
9786                 if log_path is None:
9787                         if not (background and level < logging.WARN):
9788                                 portage.util.writemsg_level(msg,
9789                                         level=level, noiselevel=noiselevel)
9790                 else:
9791                         if not background:
9792                                 portage.util.writemsg_level(msg,
9793                                         level=level, noiselevel=noiselevel)
9794                         self._append_to_log_path(log_path, msg)
9795
9796         def _dblink_ebuild_phase(self,
9797                 pkg_dblink, pkg_dbapi, ebuild_path, phase):
9798                 """
9799                 Using this callback for merge phases allows the scheduler
9800                 to run while these phases execute asynchronously, and allows
9801                 the scheduler control output handling.
9802                 """
9803
9804                 scheduler = self._sched_iface
9805                 settings = pkg_dblink.settings
9806                 pkg = self._dblink_pkg(pkg_dblink)
9807                 background = self._background
9808                 log_path = settings.get("PORTAGE_LOG_FILE")
9809
9810                 ebuild_phase = EbuildPhase(background=background,
9811                         pkg=pkg, phase=phase, scheduler=scheduler,
9812                         settings=settings, tree=pkg_dblink.treetype)
9813                 ebuild_phase.start()
9814                 ebuild_phase.wait()
9815
9816                 return ebuild_phase.returncode
9817
9818         def _check_manifests(self):
9819                 # Verify all the manifests now so that the user is notified of failure
9820                 # as soon as possible.
9821                 if "strict" not in self.settings.features or \
9822                         "--fetchonly" in self.myopts or \
9823                         "--fetch-all-uri" in self.myopts:
9824                         return os.EX_OK
9825
9826                 shown_verifying_msg = False
9827                 quiet_settings = {}
9828                 for myroot, pkgsettings in self.pkgsettings.iteritems():
9829                         quiet_config = portage.config(clone=pkgsettings)
9830                         quiet_config["PORTAGE_QUIET"] = "1"
9831                         quiet_config.backup_changes("PORTAGE_QUIET")
9832                         quiet_settings[myroot] = quiet_config
9833                         del quiet_config
9834
9835                 for x in self._mergelist:
9836                         if not isinstance(x, Package) or \
9837                                 x.type_name != "ebuild":
9838                                 continue
9839
9840                         if not shown_verifying_msg:
9841                                 shown_verifying_msg = True
9842                                 self._status_msg("Verifying ebuild manifests")
9843
9844                         root_config = x.root_config
9845                         portdb = root_config.trees["porttree"].dbapi
9846                         quiet_config = quiet_settings[root_config.root]
9847                         quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
9848                         if not portage.digestcheck([], quiet_config, strict=True):
9849                                 return 1
9850
9851                 return os.EX_OK
9852
9853         def _add_prefetchers(self):
9854
9855                 if not self._parallel_fetch:
9856                         return
9857
9858                 if self._parallel_fetch:
9859                         self._status_msg("Starting parallel fetch")
9860
9861                         prefetchers = self._prefetchers
9862                         getbinpkg = "--getbinpkg" in self.myopts
9863
9864                         # In order to avoid "waiting for lock" messages
9865                         # at the beginning, which annoy users, never
9866                         # spawn a prefetcher for the first package.
9867                         for pkg in self._mergelist[1:]:
9868                                 prefetcher = self._create_prefetcher(pkg)
9869                                 if prefetcher is not None:
9870                                         self._task_queues.fetch.add(prefetcher)
9871                                         prefetchers[pkg] = prefetcher
9872
9873         def _create_prefetcher(self, pkg):
9874                 """
9875                 @return: a prefetcher, or None if not applicable
9876                 """
9877                 prefetcher = None
9878
9879                 if not isinstance(pkg, Package):
9880                         pass
9881
9882                 elif pkg.type_name == "ebuild":
9883
9884                         prefetcher = EbuildFetcher(background=True,
9885                                 config_pool=self._ConfigPool(pkg.root,
9886                                 self._allocate_config, self._deallocate_config),
9887                                 fetchonly=1, logfile=self._fetch_log,
9888                                 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
9889
9890                 elif pkg.type_name == "binary" and \
9891                         "--getbinpkg" in self.myopts and \
9892                         pkg.root_config.trees["bintree"].isremote(pkg.cpv):
9893
9894                         prefetcher = BinpkgPrefetcher(background=True,
9895                                 pkg=pkg, scheduler=self._sched_iface)
9896
9897                 return prefetcher
9898
9899         def _is_restart_scheduled(self):
9900                 """
9901                 Check if the merge list contains a replacement
9902                 for the current running instance, that will result
9903                 in restart after merge.
9904                 @rtype: bool
9905                 @returns: True if a restart is scheduled, False otherwise.
9906                 """
9907                 if self._opts_no_restart.intersection(self.myopts):
9908                         return False
9909
9910                 mergelist = self._mergelist
9911
9912                 for i, pkg in enumerate(mergelist):
9913                         if self._is_restart_necessary(pkg) and \
9914                                 i != len(mergelist) - 1:
9915                                 return True
9916
9917                 return False
9918
9919         def _is_restart_necessary(self, pkg):
9920                 """
9921                 @return: True if merging the given package
9922                         requires restart, False otherwise.
9923                 """
9924
9925                 # Figure out if we need a restart.
9926                 if pkg.root == self._running_root.root and \
9927                         portage.match_from_list(
9928                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
9929                         if self._running_portage:
9930                                 return pkg.cpv != self._running_portage.cpv
9931                         return True
9932                 return False
9933
9934         def _restart_if_necessary(self, pkg):
9935                 """
9936                 Use execv() to restart emerge. This happens
9937                 if portage upgrades itself and there are
9938                 remaining packages in the list.
9939                 """
9940
9941                 if self._opts_no_restart.intersection(self.myopts):
9942                         return
9943
9944                 if not self._is_restart_necessary(pkg):
9945                         return
9946
9947                 if pkg == self._mergelist[-1]:
9948                         return
9949
9950                 self._main_loop_cleanup()
9951
9952                 logger = self._logger
9953                 pkg_count = self._pkg_count
9954                 mtimedb = self._mtimedb
9955                 bad_resume_opts = self._bad_resume_opts
9956
9957                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
9958                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
9959
9960                 logger.log(" *** RESTARTING " + \
9961                         "emerge via exec() after change of " + \
9962                         "portage version.")
9963
9964                 mtimedb["resume"]["mergelist"].remove(list(pkg))
9965                 mtimedb.commit()
9966                 portage.run_exitfuncs()
9967                 mynewargv = [sys.argv[0], "--resume"]
9968                 resume_opts = self.myopts.copy()
9969                 # For automatic resume, we need to prevent
9970                 # any of bad_resume_opts from leaking in
9971                 # via EMERGE_DEFAULT_OPTS.
9972                 resume_opts["--ignore-default-opts"] = True
9973                 for myopt, myarg in resume_opts.iteritems():
9974                         if myopt not in bad_resume_opts:
9975                                 if myarg is True:
9976                                         mynewargv.append(myopt)
9977                                 else:
9978                                         mynewargv.append(myopt +"="+ str(myarg))
9979                 # priority only needs to be adjusted on the first run
9980                 os.environ["PORTAGE_NICENESS"] = "0"
9981                 os.execv(mynewargv[0], mynewargv)
9982
9983         def merge(self):
9984
9985                 if "--resume" in self.myopts:
9986                         # We're resuming.
9987                         portage.writemsg_stdout(
9988                                 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
9989                         self._logger.log(" *** Resuming merge...")
9990
9991                 self._save_resume_list()
9992
9993                 try:
9994                         self._background = self._background_mode()
9995                 except self._unknown_internal_error:
9996                         return 1
9997
9998                 for root in self.trees:
9999                         root_config = self.trees[root]["root_config"]
10000
10001                         # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10002                         # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10003                         # for ensuring sane $PWD (bug #239560) and storing elog messages.
10004                         tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10005                         if not tmpdir or not os.path.isdir(tmpdir):
10006                                 msg = "The directory specified in your " + \
10007                                         "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10008                                 "does not exist. Please create this " + \
10009                                 "directory or correct your PORTAGE_TMPDIR setting."
10010                                 msg = textwrap.wrap(msg, 70)
10011                                 out = portage.output.EOutput()
10012                                 for l in msg:
10013                                         out.eerror(l)
10014                                 return 1
10015
10016                         if self._background:
10017                                 root_config.settings.unlock()
10018                                 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10019                                 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10020                                 root_config.settings.lock()
10021
10022                         self.pkgsettings[root] = portage.config(
10023                                 clone=root_config.settings)
10024
10025                 rval = self._check_manifests()
10026                 if rval != os.EX_OK:
10027                         return rval
10028
10029                 keep_going = "--keep-going" in self.myopts
10030                 fetchonly = self._build_opts.fetchonly
10031                 mtimedb = self._mtimedb
10032                 failed_pkgs = self._failed_pkgs
10033
10034                 while True:
10035                         rval = self._merge()
10036                         if rval == os.EX_OK or fetchonly or not keep_going:
10037                                 break
10038                         if "resume" not in mtimedb:
10039                                 break
10040                         mergelist = self._mtimedb["resume"].get("mergelist")
10041                         if not mergelist:
10042                                 break
10043
10044                         if not failed_pkgs:
10045                                 break
10046
10047                         for failed_pkg in failed_pkgs:
10048                                 mergelist.remove(list(failed_pkg.pkg))
10049
10050                         self._failed_pkgs_all.extend(failed_pkgs)
10051                         del failed_pkgs[:]
10052
10053                         if not mergelist:
10054                                 break
10055
10056                         if not self._calc_resume_list():
10057                                 break
10058
10059                         clear_caches(self.trees)
10060                         if not self._mergelist:
10061                                 break
10062
10063                         self._save_resume_list()
10064                         self._pkg_count.curval = 0
10065                         self._pkg_count.maxval = len([x for x in self._mergelist \
10066                                 if isinstance(x, Package) and x.operation == "merge"])
10067                         self._status_display.maxval = self._pkg_count.maxval
10068
10069                 self._logger.log(" *** Finished. Cleaning up...")
10070
10071                 if failed_pkgs:
10072                         self._failed_pkgs_all.extend(failed_pkgs)
10073                         del failed_pkgs[:]
10074
10075                 background = self._background
10076                 failure_log_shown = False
10077                 if background and len(self._failed_pkgs_all) == 1:
10078                         # If only one package failed then just show it's
10079                         # whole log for easy viewing.
10080                         failed_pkg = self._failed_pkgs_all[-1]
10081                         build_dir = failed_pkg.build_dir
10082                         log_file = None
10083
10084                         log_paths = [failed_pkg.build_log]
10085
10086                         log_path = self._locate_failure_log(failed_pkg)
10087                         if log_path is not None:
10088                                 try:
10089                                         log_file = open(log_path, 'rb')
10090                                 except IOError:
10091                                         pass
10092
10093                         if log_file is not None:
10094                                 try:
10095                                         for line in log_file:
10096                                                 writemsg_level(line, noiselevel=-1)
10097                                 finally:
10098                                         log_file.close()
10099                                 failure_log_shown = True
10100
10101                 # Dump mod_echo output now since it tends to flood the terminal.
10102                 # This allows us to avoid having more important output, generated
10103                 # later, from being swept away by the mod_echo output.
10104                 mod_echo_output =  _flush_elog_mod_echo()
10105
10106                 if background and not failure_log_shown and \
10107                         self._failed_pkgs_all and \
10108                         self._failed_pkgs_die_msgs and \
10109                         not mod_echo_output:
10110
10111                         printer = portage.output.EOutput()
10112                         for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10113                                 root_msg = ""
10114                                 if mysettings["ROOT"] != "/":
10115                                         root_msg = " merged to %s" % mysettings["ROOT"]
10116                                 print
10117                                 printer.einfo("Error messages for package %s%s:" % \
10118                                         (colorize("INFORM", key), root_msg))
10119                                 print
10120                                 for phase in portage.const.EBUILD_PHASES:
10121                                         if phase not in logentries:
10122                                                 continue
10123                                         for msgtype, msgcontent in logentries[phase]:
10124                                                 if isinstance(msgcontent, basestring):
10125                                                         msgcontent = [msgcontent]
10126                                                 for line in msgcontent:
10127                                                         printer.eerror(line.strip("\n"))
10128
10129                 if self._post_mod_echo_msgs:
10130                         for msg in self._post_mod_echo_msgs:
10131                                 msg()
10132
10133                 if len(self._failed_pkgs_all) > 1:
10134                         msg = "The following packages have " + \
10135                                 "failed to build or install:"
10136                         prefix = bad(" * ")
10137                         writemsg(prefix + "\n", noiselevel=-1)
10138                         from textwrap import wrap
10139                         for line in wrap(msg, 72):
10140                                 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10141                         writemsg(prefix + "\n", noiselevel=-1)
10142                         for failed_pkg in self._failed_pkgs_all:
10143                                 writemsg("%s\t%s\n" % (prefix,
10144                                         colorize("INFORM", str(failed_pkg.pkg))),
10145                                         noiselevel=-1)
10146                         writemsg(prefix + "\n", noiselevel=-1)
10147
10148                 return rval
10149
10150         def _elog_listener(self, mysettings, key, logentries, fulltext):
10151                 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10152                 if errors:
10153                         self._failed_pkgs_die_msgs.append(
10154                                 (mysettings, key, errors))
10155
10156         def _locate_failure_log(self, failed_pkg):
10157
10158                 build_dir = failed_pkg.build_dir
10159                 log_file = None
10160
10161                 log_paths = [failed_pkg.build_log]
10162
10163                 for log_path in log_paths:
10164                         if not log_path:
10165                                 continue
10166
10167                         try:
10168                                 log_size = os.stat(log_path).st_size
10169                         except OSError:
10170                                 continue
10171
10172                         if log_size == 0:
10173                                 continue
10174
10175                         return log_path
10176
10177                 return None
10178
10179         def _add_packages(self):
10180                 pkg_queue = self._pkg_queue
10181                 for pkg in self._mergelist:
10182                         if isinstance(pkg, Package):
10183                                 pkg_queue.append(pkg)
10184                         elif isinstance(pkg, Blocker):
10185                                 pass
10186
10187         def _merge_exit(self, merge):
10188                 self._do_merge_exit(merge)
10189                 self._deallocate_config(merge.merge.settings)
10190                 if merge.returncode == os.EX_OK and \
10191                         not merge.merge.pkg.installed:
10192                         self._status_display.curval += 1
10193                 self._status_display.merges = len(self._task_queues.merge)
10194                 self._schedule()
10195
10196         def _do_merge_exit(self, merge):
10197                 pkg = merge.merge.pkg
10198                 if merge.returncode != os.EX_OK:
10199                         settings = merge.merge.settings
10200                         build_dir = settings.get("PORTAGE_BUILDDIR")
10201                         build_log = settings.get("PORTAGE_LOG_FILE")
10202
10203                         self._failed_pkgs.append(self._failed_pkg(
10204                                 build_dir=build_dir, build_log=build_log,
10205                                 pkg=pkg,
10206                                 returncode=merge.returncode))
10207                         self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
10208
10209                         self._status_display.failed = len(self._failed_pkgs)
10210                         return
10211
10212                 self._task_complete(pkg)
10213                 pkg_to_replace = merge.merge.pkg_to_replace
10214                 if pkg_to_replace is not None:
10215                         # When a package is replaced, mark it's uninstall
10216                         # task complete (if any).
10217                         uninst_hash_key = \
10218                                 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
10219                         self._task_complete(uninst_hash_key)
10220
10221                 if pkg.installed:
10222                         return
10223
10224                 self._restart_if_necessary(pkg)
10225
10226                 # Call mtimedb.commit() after each merge so that
10227                 # --resume still works after being interrupted
10228                 # by reboot, sigkill or similar.
10229                 mtimedb = self._mtimedb
10230                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10231                 if not mtimedb["resume"]["mergelist"]:
10232                         del mtimedb["resume"]
10233                 mtimedb.commit()
10234
10235         def _build_exit(self, build):
10236                 if build.returncode == os.EX_OK:
10237                         self.curval += 1
10238                         merge = PackageMerge(merge=build)
10239                         merge.addExitListener(self._merge_exit)
10240                         self._task_queues.merge.add(merge)
10241                         self._status_display.merges = len(self._task_queues.merge)
10242                 else:
10243                         settings = build.settings
10244                         build_dir = settings.get("PORTAGE_BUILDDIR")
10245                         build_log = settings.get("PORTAGE_LOG_FILE")
10246
10247                         self._failed_pkgs.append(self._failed_pkg(
10248                                 build_dir=build_dir, build_log=build_log,
10249                                 pkg=build.pkg,
10250                                 returncode=build.returncode))
10251                         self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
10252
10253                         self._status_display.failed = len(self._failed_pkgs)
10254                         self._deallocate_config(build.settings)
10255                 self._jobs -= 1
10256                 self._status_display.running = self._jobs
10257                 self._schedule()
10258
10259         def _extract_exit(self, build):
10260                 self._build_exit(build)
10261
10262         def _task_complete(self, pkg):
10263                 self._completed_tasks.add(pkg)
10264                 self._choose_pkg_return_early = False
10265
10266         def _merge(self):
10267
10268                 self._add_prefetchers()
10269                 self._add_packages()
10270                 pkg_queue = self._pkg_queue
10271                 failed_pkgs = self._failed_pkgs
10272                 portage.locks._quiet = self._background
10273                 portage.elog._emerge_elog_listener = self._elog_listener
10274                 rval = os.EX_OK
10275
10276                 try:
10277                         self._main_loop()
10278                 finally:
10279                         self._main_loop_cleanup()
10280                         portage.locks._quiet = False
10281                         portage.elog._emerge_elog_listener = None
10282                         if failed_pkgs:
10283                                 rval = failed_pkgs[-1].returncode
10284
10285                 return rval
10286
10287         def _main_loop_cleanup(self):
10288                 del self._pkg_queue[:]
10289                 self._completed_tasks.clear()
10290                 self._choose_pkg_return_early = False
10291                 self._status_display.reset()
10292                 self._digraph = None
10293                 self._task_queues.fetch.clear()
10294
10295         def _choose_pkg(self):
10296                 """
10297                 Choose a task that has all it's dependencies satisfied.
10298                 """
10299
10300                 if self._choose_pkg_return_early:
10301                         return None
10302
10303                 if self._digraph is None:
10304                         if (self._jobs or self._task_queues.merge) and \
10305                                 not ("--nodeps" in self.myopts and \
10306                                 (self._max_jobs is True or self._max_jobs > 1)):
10307                                 self._choose_pkg_return_early = True
10308                                 return None
10309                         return self._pkg_queue.pop(0)
10310
10311                 if not (self._jobs or self._task_queues.merge):
10312                         return self._pkg_queue.pop(0)
10313
10314                 self._prune_digraph()
10315
10316                 chosen_pkg = None
10317                 later = set(self._pkg_queue)
10318                 for pkg in self._pkg_queue:
10319                         later.remove(pkg)
10320                         if not self._dependent_on_scheduled_merges(pkg, later):
10321                                 chosen_pkg = pkg
10322                                 break
10323
10324                 if chosen_pkg is not None:
10325                         self._pkg_queue.remove(chosen_pkg)
10326
10327                 if chosen_pkg is None:
10328                         # There's no point in searching for a package to
10329                         # choose until at least one of the existing jobs
10330                         # completes.
10331                         self._choose_pkg_return_early = True
10332
10333                 return chosen_pkg
10334
10335         def _dependent_on_scheduled_merges(self, pkg, later):
10336                 """
10337                 Traverse the subgraph of the given packages deep dependencies
10338                 to see if it contains any scheduled merges.
10339                 @param pkg: a package to check dependencies for
10340                 @type pkg: Package
10341                 @param later: packages for which dependence should be ignored
10342                         since they will be merged later than pkg anyway and therefore
10343                         delaying the merge of pkg will not result in a more optimal
10344                         merge order
10345                 @type later: set
10346                 @rtype: bool
10347                 @returns: True if the package is dependent, False otherwise.
10348                 """
10349
10350                 graph = self._digraph
10351                 completed_tasks = self._completed_tasks
10352
10353                 dependent = False
10354                 traversed_nodes = set([pkg])
10355                 direct_deps = graph.child_nodes(pkg)
10356                 node_stack = direct_deps
10357                 direct_deps = frozenset(direct_deps)
10358                 while node_stack:
10359                         node = node_stack.pop()
10360                         if node in traversed_nodes:
10361                                 continue
10362                         traversed_nodes.add(node)
10363                         if not ((node.installed and node.operation == "nomerge") or \
10364                                 (node.operation == "uninstall" and \
10365                                 node not in direct_deps) or \
10366                                 node in completed_tasks or \
10367                                 node in later):
10368                                 dependent = True
10369                                 break
10370                         node_stack.extend(graph.child_nodes(node))
10371
10372                 return dependent
10373
10374         def _allocate_config(self, root):
10375                 """
10376                 Allocate a unique config instance for a task in order
10377                 to prevent interference between parallel tasks.
10378                 """
10379                 if self._config_pool[root]:
10380                         temp_settings = self._config_pool[root].pop()
10381                 else:
10382                         temp_settings = portage.config(clone=self.pkgsettings[root])
10383                 # Since config.setcpv() isn't guaranteed to call config.reset() due to
10384                 # performance reasons, call it here to make sure all settings from the
10385                 # previous package get flushed out (such as PORTAGE_LOG_FILE).
10386                 temp_settings.reload()
10387                 temp_settings.reset()
10388                 return temp_settings
10389
10390         def _deallocate_config(self, settings):
10391                 self._config_pool[settings["ROOT"]].append(settings)
10392
10393         def _main_loop(self):
10394
10395                 # Only allow 1 job max if a restart is scheduled
10396                 # due to portage update.
10397                 if self._is_restart_scheduled() or \
10398                         self._opts_no_background.intersection(self.myopts):
10399                         self._set_max_jobs(1)
10400
10401                 merge_queue = self._task_queues.merge
10402
10403                 while self._schedule():
10404                         if self._poll_event_handlers:
10405                                 self._poll_loop()
10406
10407                 while True:
10408                         self._schedule()
10409                         if not (self._jobs or merge_queue):
10410                                 break
10411                         if self._poll_event_handlers:
10412                                 self._poll_loop()
10413
10414         def _keep_scheduling(self):
10415                 return bool(self._pkg_queue and \
10416                         not (self._failed_pkgs and not self._build_opts.fetchonly))
10417
10418         def _schedule_tasks(self):
10419                 self._schedule_tasks_imp()
10420                 self._status_display.display()
10421
10422                 state_change = 0
10423                 for q in self._task_queues.values():
10424                         if q.schedule():
10425                                 state_change += 1
10426
10427                 # Cancel prefetchers if they're the only reason
10428                 # the main poll loop is still running.
10429                 if self._failed_pkgs and not self._build_opts.fetchonly and \
10430                         not (self._jobs or self._task_queues.merge) and \
10431                         self._task_queues.fetch:
10432                         self._task_queues.fetch.clear()
10433                         state_change += 1
10434
10435                 if state_change:
10436                         self._schedule_tasks_imp()
10437                         self._status_display.display()
10438
10439                 return self._keep_scheduling()
10440
10441         def _job_delay(self):
10442                 """
10443                 @rtype: bool
10444                 @returns: True if job scheduling should be delayed, False otherwise.
10445                 """
10446
10447                 if self._jobs and self._max_load is not None:
10448
10449                         current_time = time.time()
10450
10451                         delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
10452                         if delay > self._job_delay_max:
10453                                 delay = self._job_delay_max
10454                         if (current_time - self._previous_job_start_time) < delay:
10455                                 return True
10456
10457                 return False
10458
10459         def _schedule_tasks_imp(self):
10460                 """
10461                 @rtype: bool
10462                 @returns: True if state changed, False otherwise.
10463                 """
10464
10465                 state_change = 0
10466
10467                 while True:
10468
10469                         if not self._keep_scheduling():
10470                                 return bool(state_change)
10471
10472                         if self._choose_pkg_return_early or \
10473                                 not self._can_add_job() or \
10474                                 self._job_delay():
10475                                 return bool(state_change)
10476
10477                         pkg = self._choose_pkg()
10478                         if pkg is None:
10479                                 return bool(state_change)
10480
10481                         state_change += 1
10482
10483                         if not pkg.installed:
10484                                 self._pkg_count.curval += 1
10485
10486                         task = self._task(pkg)
10487
10488                         if pkg.installed:
10489                                 merge = PackageMerge(merge=task)
10490                                 merge.addExitListener(self._merge_exit)
10491                                 self._task_queues.merge.add(merge)
10492
10493                         elif pkg.built:
10494                                 self._jobs += 1
10495                                 self._previous_job_start_time = time.time()
10496                                 self._status_display.running = self._jobs
10497                                 task.addExitListener(self._extract_exit)
10498                                 self._task_queues.jobs.add(task)
10499
10500                         else:
10501                                 self._jobs += 1
10502                                 self._previous_job_start_time = time.time()
10503                                 self._status_display.running = self._jobs
10504                                 task.addExitListener(self._build_exit)
10505                                 self._task_queues.jobs.add(task)
10506
10507                 return bool(state_change)
10508
10509         def _task(self, pkg):
10510
10511                 pkg_to_replace = None
10512                 if pkg.operation != "uninstall":
10513                         vardb = pkg.root_config.trees["vartree"].dbapi
10514                         previous_cpv = vardb.match(pkg.slot_atom)
10515                         if previous_cpv:
10516                                 previous_cpv = previous_cpv.pop()
10517                                 pkg_to_replace = self._pkg(previous_cpv,
10518                                         "installed", pkg.root_config, installed=True)
10519
10520                 task = MergeListItem(args_set=self._args_set,
10521                         background=self._background, binpkg_opts=self._binpkg_opts,
10522                         build_opts=self._build_opts,
10523                         config_pool=self._ConfigPool(pkg.root,
10524                         self._allocate_config, self._deallocate_config),
10525                         emerge_opts=self.myopts,
10526                         find_blockers=self._find_blockers(pkg), logger=self._logger,
10527                         mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
10528                         pkg_to_replace=pkg_to_replace,
10529                         prefetcher=self._prefetchers.get(pkg),
10530                         scheduler=self._sched_iface,
10531                         settings=self._allocate_config(pkg.root),
10532                         statusMessage=self._status_msg,
10533                         world_atom=self._world_atom)
10534
10535                 return task
10536
10537         def _failed_pkg_msg(self, failed_pkg, action, preposition):
10538                 pkg = failed_pkg.pkg
10539                 msg = "%s to %s %s" % \
10540                         (bad("Failed"), action, colorize("INFORM", pkg.cpv))
10541                 if pkg.root != "/":
10542                         msg += " %s %s" % (preposition, pkg.root)
10543
10544                 log_path = self._locate_failure_log(failed_pkg)
10545                 if log_path is not None:
10546                         msg += ", Log file:"
10547                 self._status_msg(msg)
10548
10549                 if log_path is not None:
10550                         self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
10551
10552         def _status_msg(self, msg):
10553                 """
10554                 Display a brief status message (no newlines) in the status display.
10555                 This is called by tasks to provide feedback to the user. This
10556                 delegates the resposibility of generating \r and \n control characters,
10557                 to guarantee that lines are created or erased when necessary and
10558                 appropriate.
10559
10560                 @type msg: str
10561                 @param msg: a brief status message (no newlines allowed)
10562                 """
10563                 if not self._background:
10564                         writemsg_level("\n")
10565                 self._status_display.displayMessage(msg)
10566
10567         def _save_resume_list(self):
10568                 """
10569                 Do this before verifying the ebuild Manifests since it might
10570                 be possible for the user to use --resume --skipfirst get past
10571                 a non-essential package with a broken digest.
10572                 """
10573                 mtimedb = self._mtimedb
10574                 mtimedb["resume"]["mergelist"] = [list(x) \
10575                         for x in self._mergelist \
10576                         if isinstance(x, Package) and x.operation == "merge"]
10577
10578                 mtimedb.commit()
10579
10580         def _calc_resume_list(self):
10581                 """
10582                 Use the current resume list to calculate a new one,
10583                 dropping any packages with unsatisfied deps.
10584                 @rtype: bool
10585                 @returns: True if successful, False otherwise.
10586                 """
10587                 print colorize("GOOD", "*** Resuming merge...")
10588
10589                 if self._show_list():
10590                         if "--tree" in self.myopts:
10591                                 portage.writemsg_stdout("\n" + \
10592                                         darkgreen("These are the packages that " + \
10593                                         "would be merged, in reverse order:\n\n"))
10594
10595                         else:
10596                                 portage.writemsg_stdout("\n" + \
10597                                         darkgreen("These are the packages that " + \
10598                                         "would be merged, in order:\n\n"))
10599
10600                 show_spinner = "--quiet" not in self.myopts and \
10601                         "--nodeps" not in self.myopts
10602
10603                 if show_spinner:
10604                         print "Calculating dependencies  ",
10605
10606                 myparams = create_depgraph_params(self.myopts, None)
10607                 success = False
10608                 e = None
10609                 try:
10610                         success, mydepgraph, dropped_tasks = resume_depgraph(
10611                                 self.settings, self.trees, self._mtimedb, self.myopts,
10612                                 myparams, self._spinner, skip_unsatisfied=True)
10613                 except depgraph.UnsatisfiedResumeDep, e:
10614                         mydepgraph = e.depgraph
10615                         dropped_tasks = set()
10616
10617                 if show_spinner:
10618                         print "\b\b... done!"
10619
10620                 if e is not None:
10621                         def unsatisfied_resume_dep_msg():
10622                                 mydepgraph.display_problems()
10623                                 out = portage.output.EOutput()
10624                                 out.eerror("One or more packages are either masked or " + \
10625                                         "have missing dependencies:")
10626                                 out.eerror("")
10627                                 indent = "  "
10628                                 show_parents = set()
10629                                 for dep in e.value:
10630                                         if dep.parent in show_parents:
10631                                                 continue
10632                                         show_parents.add(dep.parent)
10633                                         if dep.atom is None:
10634                                                 out.eerror(indent + "Masked package:")
10635                                                 out.eerror(2 * indent + str(dep.parent))
10636                                                 out.eerror("")
10637                                         else:
10638                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
10639                                                 out.eerror(2 * indent + str(dep.parent))
10640                                                 out.eerror("")
10641                                 msg = "The resume list contains packages " + \
10642                                         "that are either masked or have " + \
10643                                         "unsatisfied dependencies. " + \
10644                                         "Please restart/continue " + \
10645                                         "the operation manually, or use --skipfirst " + \
10646                                         "to skip the first package in the list and " + \
10647                                         "any other packages that may be " + \
10648                                         "masked or have missing dependencies."
10649                                 for line in textwrap.wrap(msg, 72):
10650                                         out.eerror(line)
10651                         self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
10652                         return False
10653
10654                 if success and self._show_list():
10655                         mylist = mydepgraph.altlist()
10656                         if mylist:
10657                                 if "--tree" in self.myopts:
10658                                         mylist.reverse()
10659                                 mydepgraph.display(mylist, favorites=self._favorites)
10660
10661                 if not success:
10662                         self._post_mod_echo_msgs.append(mydepgraph.display_problems)
10663                         return False
10664                 mydepgraph.display_problems()
10665
10666                 mylist = mydepgraph.altlist()
10667                 mydepgraph.break_refs(mylist)
10668                 mydepgraph.break_refs(dropped_tasks)
10669                 self._mergelist = mylist
10670                 self._set_digraph(mydepgraph.schedulerGraph())
10671
10672                 msg_width = 75
10673                 for task in dropped_tasks:
10674                         if not (isinstance(task, Package) and task.operation == "merge"):
10675                                 continue
10676                         pkg = task
10677                         msg = "emerge --keep-going:" + \
10678                                 " %s" % (pkg.cpv,)
10679                         if pkg.root != "/":
10680                                 msg += " for %s" % (pkg.root,)
10681                         msg += " dropped due to unsatisfied dependency."
10682                         for line in textwrap.wrap(msg, msg_width):
10683                                 eerror(line, phase="other", key=pkg.cpv)
10684                         settings = self.pkgsettings[pkg.root]
10685                         # Ensure that log collection from $T is disabled inside
10686                         # elog_process(), since any logs that might exist are
10687                         # not valid here.
10688                         settings.pop("T", None)
10689                         portage.elog.elog_process(pkg.cpv, settings)
10690                         self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
10691
10692                 return True
10693
10694         def _show_list(self):
10695                 myopts = self.myopts
10696                 if "--quiet" not in myopts and \
10697                         ("--ask" in myopts or "--tree" in myopts or \
10698                         "--verbose" in myopts):
10699                         return True
10700                 return False
10701
10702         def _world_atom(self, pkg):
10703                 """
10704                 Add the package to the world file, but only if
10705                 it's supposed to be added. Otherwise, do nothing.
10706                 """
10707
10708                 if set(("--buildpkgonly", "--fetchonly",
10709                         "--fetch-all-uri",
10710                         "--oneshot", "--onlydeps",
10711                         "--pretend")).intersection(self.myopts):
10712                         return
10713
10714                 if pkg.root != self.target_root:
10715                         return
10716
10717                 args_set = self._args_set
10718                 if not args_set.findAtomForPackage(pkg):
10719                         return
10720
10721                 logger = self._logger
10722                 pkg_count = self._pkg_count
10723                 root_config = pkg.root_config
10724                 world_set = root_config.sets["world"]
10725                 world_locked = False
10726                 if hasattr(world_set, "lock"):
10727                         world_set.lock()
10728                         world_locked = True
10729
10730                 try:
10731                         if hasattr(world_set, "load"):
10732                                 world_set.load() # maybe it's changed on disk
10733
10734                         atom = create_world_atom(pkg, args_set, root_config)
10735                         if atom:
10736                                 if hasattr(world_set, "add"):
10737                                         self._status_msg(('Recording %s in "world" ' + \
10738                                                 'favorites file...') % atom)
10739                                         logger.log(" === (%s of %s) Updating world file (%s)" % \
10740                                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
10741                                         world_set.add(atom)
10742                                 else:
10743                                         writemsg_level('\n!!! Unable to record %s in "world"\n' % \
10744                                                 (atom,), level=logging.WARN, noiselevel=-1)
10745                 finally:
10746                         if world_locked:
10747                                 world_set.unlock()
10748
10749         def _pkg(self, cpv, type_name, root_config, installed=False):
10750                 """
10751                 Get a package instance from the cache, or create a new
10752                 one if necessary. Raises KeyError from aux_get if it
10753                 failures for some reason (package does not exist or is
10754                 corrupt).
10755                 """
10756                 operation = "merge"
10757                 if installed:
10758                         operation = "nomerge"
10759
10760                 if self._digraph is not None:
10761                         # Reuse existing instance when available.
10762                         pkg = self._digraph.get(
10763                                 (type_name, root_config.root, cpv, operation))
10764                         if pkg is not None:
10765                                 return pkg
10766
10767                 tree_type = depgraph.pkg_tree_map[type_name]
10768                 db = root_config.trees[tree_type].dbapi
10769                 db_keys = list(self.trees[root_config.root][
10770                         tree_type].dbapi._aux_cache_keys)
10771                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
10772                 pkg = Package(cpv=cpv, metadata=metadata,
10773                         root_config=root_config, installed=installed)
10774                 if type_name == "ebuild":
10775                         settings = self.pkgsettings[root_config.root]
10776                         settings.setcpv(pkg)
10777                         pkg.metadata["USE"] = settings["PORTAGE_USE"]
10778
10779                 return pkg
10780
10781 class MetadataRegen(PollScheduler):
10782
10783         def __init__(self, portdb, max_jobs=None, max_load=None):
10784                 PollScheduler.__init__(self)
10785                 self._portdb = portdb
10786
10787                 if max_jobs is None:
10788                         max_jobs = 1
10789
10790                 self._max_jobs = max_jobs
10791                 self._max_load = max_load
10792                 self._sched_iface = self._sched_iface_class(
10793                         register=self._register,
10794                         schedule=self._schedule_wait,
10795                         unregister=self._unregister)
10796
10797                 self._valid_pkgs = set()
10798                 self._process_iter = self._iter_metadata_processes()
10799
10800         def _iter_metadata_processes(self):
10801                 portdb = self._portdb
10802                 valid_pkgs = self._valid_pkgs
10803                 every_cp = portdb.cp_all()
10804                 every_cp.sort(reverse=True)
10805
10806                 while every_cp:
10807                         cp = every_cp.pop()
10808                         portage.writemsg_stdout("Processing %s\n" % cp)
10809                         cpv_list = portdb.cp_list(cp)
10810                         for cpv in cpv_list:
10811                                 valid_pkgs.add(cpv)
10812                                 ebuild_path, repo_path = portdb.findname2(cpv)
10813                                 metadata_process = portdb._metadata_process(
10814                                         cpv, ebuild_path, repo_path)
10815                                 if metadata_process is None:
10816                                         continue
10817                                 yield metadata_process
10818
10819         def run(self):
10820
10821                 portdb = self._portdb
10822                 from portage.cache.cache_errors import CacheError
10823                 dead_nodes = {}
10824
10825                 for mytree in portdb.porttrees:
10826                         try:
10827                                 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
10828                         except CacheError, e:
10829                                 portage.writemsg("Error listing cache entries for " + \
10830                                         "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1)
10831                                 del e
10832                                 dead_nodes = None
10833                                 break
10834
10835                 while self._schedule():
10836                         self._poll_loop()
10837
10838                 while self._jobs:
10839                         self._poll_loop()
10840
10841                 if dead_nodes:
10842                         for y in self._valid_pkgs:
10843                                 for mytree in portdb.porttrees:
10844                                         if portdb.findname2(y, mytree=mytree)[0]:
10845                                                 dead_nodes[mytree].discard(y)
10846
10847                         for mytree, nodes in dead_nodes.iteritems():
10848                                 auxdb = portdb.auxdb[mytree]
10849                                 for y in nodes:
10850                                         try:
10851                                                 del auxdb[y]
10852                                         except (KeyError, CacheError):
10853                                                 pass
10854
10855         def _schedule_tasks(self):
10856                 """
10857                 @rtype: bool
10858                 @returns: True if there may be remaining tasks to schedule,
10859                         False otherwise.
10860                 """
10861                 while self._can_add_job():
10862                         try:
10863                                 metadata_process = self._process_iter.next()
10864                         except StopIteration:
10865                                 return False
10866
10867                         self._jobs += 1
10868                         metadata_process.scheduler = self._sched_iface
10869                         metadata_process.addExitListener(self._metadata_exit)
10870                         metadata_process.start()
10871                 return True
10872
10873         def _metadata_exit(self, metadata_process):
10874                 self._jobs -= 1
10875                 if metadata_process.returncode != os.EX_OK:
10876                         self._valid_pkgs.discard(metadata_process.cpv)
10877                         portage.writemsg("Error processing %s, continuing...\n" % \
10878                                 (metadata_process.cpv,))
10879                 self._schedule()
10880
10881 class UninstallFailure(portage.exception.PortageException):
10882         """
10883         An instance of this class is raised by unmerge() when
10884         an uninstallation fails.
10885         """
10886         status = 1
10887         def __init__(self, *pargs):
10888                 portage.exception.PortageException.__init__(self, pargs)
10889                 if pargs:
10890                         self.status = pargs[0]
10891
10892 def unmerge(root_config, myopts, unmerge_action,
10893         unmerge_files, ldpath_mtimes, autoclean=0,
10894         clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
10895         scheduler=None, writemsg_level=portage.util.writemsg_level):
10896
10897         quiet = "--quiet" in myopts
10898         settings = root_config.settings
10899         sets = root_config.sets
10900         vartree = root_config.trees["vartree"]
10901         candidate_catpkgs=[]
10902         global_unmerge=0
10903         xterm_titles = "notitles" not in settings.features
10904         out = portage.output.EOutput()
10905         pkg_cache = {}
10906         db_keys = list(vartree.dbapi._aux_cache_keys)
10907
10908         def _pkg(cpv):
10909                 pkg = pkg_cache.get(cpv)
10910                 if pkg is None:
10911                         pkg = Package(cpv=cpv, installed=True,
10912                                 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
10913                                 root_config=root_config,
10914                                 type_name="installed")
10915                         pkg_cache[cpv] = pkg
10916                 return pkg
10917
10918         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
10919         try:
10920                 # At least the parent needs to exist for the lock file.
10921                 portage.util.ensure_dirs(vdb_path)
10922         except portage.exception.PortageException:
10923                 pass
10924         vdb_lock = None
10925         try:
10926                 if os.access(vdb_path, os.W_OK):
10927                         vdb_lock = portage.locks.lockdir(vdb_path)
10928                 realsyslist = sets["system"].getAtoms()
10929                 syslist = []
10930                 for x in realsyslist:
10931                         mycp = portage.dep_getkey(x)
10932                         if mycp in settings.getvirtuals():
10933                                 providers = []
10934                                 for provider in settings.getvirtuals()[mycp]:
10935                                         if vartree.dbapi.match(provider):
10936                                                 providers.append(provider)
10937                                 if len(providers) == 1:
10938                                         syslist.extend(providers)
10939                         else:
10940                                 syslist.append(mycp)
10941         
10942                 mysettings = portage.config(clone=settings)
10943         
10944                 if not unmerge_files:
10945                         if unmerge_action == "unmerge":
10946                                 print
10947                                 print bold("emerge unmerge") + " can only be used with specific package names"
10948                                 print
10949                                 return 0
10950                         else:
10951                                 global_unmerge = 1
10952         
10953                 localtree = vartree
10954                 # process all arguments and add all
10955                 # valid db entries to candidate_catpkgs
10956                 if global_unmerge:
10957                         if not unmerge_files:
10958                                 candidate_catpkgs.extend(vartree.dbapi.cp_all())
10959                 else:
10960                         #we've got command-line arguments
10961                         if not unmerge_files:
10962                                 print "\nNo packages to unmerge have been provided.\n"
10963                                 return 0
10964                         for x in unmerge_files:
10965                                 arg_parts = x.split('/')
10966                                 if x[0] not in [".","/"] and \
10967                                         arg_parts[-1][-7:] != ".ebuild":
10968                                         #possible cat/pkg or dep; treat as such
10969                                         candidate_catpkgs.append(x)
10970                                 elif unmerge_action in ["prune","clean"]:
10971                                         print "\n!!! Prune and clean do not accept individual" + \
10972                                                 " ebuilds as arguments;\n    skipping.\n"
10973                                         continue
10974                                 else:
10975                                         # it appears that the user is specifying an installed
10976                                         # ebuild and we're in "unmerge" mode, so it's ok.
10977                                         if not os.path.exists(x):
10978                                                 print "\n!!! The path '"+x+"' doesn't exist.\n"
10979                                                 return 0
10980         
10981                                         absx   = os.path.abspath(x)
10982                                         sp_absx = absx.split("/")
10983                                         if sp_absx[-1][-7:] == ".ebuild":
10984                                                 del sp_absx[-1]
10985                                                 absx = "/".join(sp_absx)
10986         
10987                                         sp_absx_len = len(sp_absx)
10988         
10989                                         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
10990                                         vdb_len  = len(vdb_path)
10991         
10992                                         sp_vdb     = vdb_path.split("/")
10993                                         sp_vdb_len = len(sp_vdb)
10994         
10995                                         if not os.path.exists(absx+"/CONTENTS"):
10996                                                 print "!!! Not a valid db dir: "+str(absx)
10997                                                 return 0
10998         
10999                                         if sp_absx_len <= sp_vdb_len:
11000                                                 # The Path is shorter... so it can't be inside the vdb.
11001                                                 print sp_absx
11002                                                 print absx
11003                                                 print "\n!!!",x,"cannot be inside "+ \
11004                                                         vdb_path+"; aborting.\n"
11005                                                 return 0
11006         
11007                                         for idx in range(0,sp_vdb_len):
11008                                                 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11009                                                         print sp_absx
11010                                                         print absx
11011                                                         print "\n!!!", x, "is not inside "+\
11012                                                                 vdb_path+"; aborting.\n"
11013                                                         return 0
11014         
11015                                         print "="+"/".join(sp_absx[sp_vdb_len:])
11016                                         candidate_catpkgs.append(
11017                                                 "="+"/".join(sp_absx[sp_vdb_len:]))
11018         
11019                 newline=""
11020                 if (not "--quiet" in myopts):
11021                         newline="\n"
11022                 if settings["ROOT"] != "/":
11023                         writemsg_level(darkgreen(newline+ \
11024                                 ">>> Using system located in ROOT tree %s\n" % \
11025                                 settings["ROOT"]))
11026
11027                 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11028                         not ("--quiet" in myopts):
11029                         writemsg_level(darkgreen(newline+\
11030                                 ">>> These are the packages that would be unmerged:\n"))
11031
11032                 # Preservation of order is required for --depclean and --prune so
11033                 # that dependencies are respected. Use all_selected to eliminate
11034                 # duplicate packages since the same package may be selected by
11035                 # multiple atoms.
11036                 pkgmap = []
11037                 all_selected = set()
11038                 for x in candidate_catpkgs:
11039                         # cycle through all our candidate deps and determine
11040                         # what will and will not get unmerged
11041                         try:
11042                                 mymatch = vartree.dbapi.match(x)
11043                         except portage.exception.AmbiguousPackageName, errpkgs:
11044                                 print "\n\n!!! The short ebuild name \"" + \
11045                                         x + "\" is ambiguous.  Please specify"
11046                                 print "!!! one of the following fully-qualified " + \
11047                                         "ebuild names instead:\n"
11048                                 for i in errpkgs[0]:
11049                                         print "    " + green(i)
11050                                 print
11051                                 sys.exit(1)
11052         
11053                         if not mymatch and x[0] not in "<>=~":
11054                                 mymatch = localtree.dep_match(x)
11055                         if not mymatch:
11056                                 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11057                                         (x, unmerge_action), noiselevel=-1)
11058                                 continue
11059
11060                         pkgmap.append(
11061                                 {"protected": set(), "selected": set(), "omitted": set()})
11062                         mykey = len(pkgmap) - 1
11063                         if unmerge_action=="unmerge":
11064                                         for y in mymatch:
11065                                                 if y not in all_selected:
11066                                                         pkgmap[mykey]["selected"].add(y)
11067                                                         all_selected.add(y)
11068                         elif unmerge_action == "prune":
11069                                 if len(mymatch) == 1:
11070                                         continue
11071                                 best_version = mymatch[0]
11072                                 best_slot = vartree.getslot(best_version)
11073                                 best_counter = vartree.dbapi.cpv_counter(best_version)
11074                                 for mypkg in mymatch[1:]:
11075                                         myslot = vartree.getslot(mypkg)
11076                                         mycounter = vartree.dbapi.cpv_counter(mypkg)
11077                                         if (myslot == best_slot and mycounter > best_counter) or \
11078                                                 mypkg == portage.best([mypkg, best_version]):
11079                                                 if myslot == best_slot:
11080                                                         if mycounter < best_counter:
11081                                                                 # On slot collision, keep the one with the
11082                                                                 # highest counter since it is the most
11083                                                                 # recently installed.
11084                                                                 continue
11085                                                 best_version = mypkg
11086                                                 best_slot = myslot
11087                                                 best_counter = mycounter
11088                                 pkgmap[mykey]["protected"].add(best_version)
11089                                 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
11090                                         if mypkg != best_version and mypkg not in all_selected)
11091                                 all_selected.update(pkgmap[mykey]["selected"])
11092                         else:
11093                                 # unmerge_action == "clean"
11094                                 slotmap={}
11095                                 for mypkg in mymatch:
11096                                         if unmerge_action == "clean":
11097                                                 myslot = localtree.getslot(mypkg)
11098                                         else:
11099                                                 # since we're pruning, we don't care about slots
11100                                                 # and put all the pkgs in together
11101                                                 myslot = 0
11102                                         if myslot not in slotmap:
11103                                                 slotmap[myslot] = {}
11104                                         slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
11105
11106                                 for mypkg in vartree.dbapi.cp_list(
11107                                         portage.dep_getkey(mymatch[0])):
11108                                         myslot = vartree.getslot(mypkg)
11109                                         if myslot not in slotmap:
11110                                                 slotmap[myslot] = {}
11111                                         slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
11112
11113                                 for myslot in slotmap:
11114                                         counterkeys = slotmap[myslot].keys()
11115                                         if not counterkeys:
11116                                                 continue
11117                                         counterkeys.sort()
11118                                         pkgmap[mykey]["protected"].add(
11119                                                 slotmap[myslot][counterkeys[-1]])
11120                                         del counterkeys[-1]
11121
11122                                         for counter in counterkeys[:]:
11123                                                 mypkg = slotmap[myslot][counter]
11124                                                 if mypkg not in mymatch:
11125                                                         counterkeys.remove(counter)
11126                                                         pkgmap[mykey]["protected"].add(
11127                                                                 slotmap[myslot][counter])
11128
11129                                         #be pretty and get them in order of merge:
11130                                         for ckey in counterkeys:
11131                                                 mypkg = slotmap[myslot][ckey]
11132                                                 if mypkg not in all_selected:
11133                                                         pkgmap[mykey]["selected"].add(mypkg)
11134                                                         all_selected.add(mypkg)
11135                                         # ok, now the last-merged package
11136                                         # is protected, and the rest are selected
11137                 numselected = len(all_selected)
11138                 if global_unmerge and not numselected:
11139                         portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
11140                         return 0
11141         
11142                 if not numselected:
11143                         portage.writemsg_stdout(
11144                                 "\n>>> No packages selected for removal by " + \
11145                                 unmerge_action + "\n")
11146                         return 0
11147         finally:
11148                 if vdb_lock:
11149                         vartree.dbapi.flush_cache()
11150                         portage.locks.unlockdir(vdb_lock)
11151         
11152         from portage.sets.base import EditablePackageSet
11153         
11154         # generate a list of package sets that are directly or indirectly listed in "world",
11155         # as there is no persistent list of "installed" sets
11156         installed_sets = ["world"]
11157         stop = False
11158         pos = 0
11159         while not stop:
11160                 stop = True
11161                 pos = len(installed_sets)
11162                 for s in installed_sets[pos - 1:]:
11163                         if s not in sets:
11164                                 continue
11165                         candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
11166                         if candidates:
11167                                 stop = False
11168                                 installed_sets += candidates
11169         installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
11170         del stop, pos
11171
11172         # we don't want to unmerge packages that are still listed in user-editable package sets
11173         # listed in "world" as they would be remerged on the next update of "world" or the 
11174         # relevant package sets.
11175         unknown_sets = set()
11176         for cp in xrange(len(pkgmap)):
11177                 for cpv in pkgmap[cp]["selected"].copy():
11178                         try:
11179                                 pkg = _pkg(cpv)
11180                         except KeyError:
11181                                 # It could have been uninstalled
11182                                 # by a concurrent process.
11183                                 continue
11184
11185                         if unmerge_action != "clean" and \
11186                                 root_config.root == "/" and \
11187                                 portage.match_from_list(
11188                                 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
11189                                 msg = ("Not unmerging package %s since there is no valid " + \
11190                                 "reason for portage to unmerge itself.") % (pkg.cpv,)
11191                                 for line in textwrap.wrap(msg, 75):
11192                                         out.eerror(line)
11193                                 # adjust pkgmap so the display output is correct
11194                                 pkgmap[cp]["selected"].remove(cpv)
11195                                 all_selected.remove(cpv)
11196                                 pkgmap[cp]["protected"].add(cpv)
11197                                 continue
11198
11199                         parents = []
11200                         for s in installed_sets:
11201                                 # skip sets that the user requested to unmerge, and skip world 
11202                                 # unless we're unmerging a package set (as the package would be 
11203                                 # removed from "world" later on)
11204                                 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
11205                                         continue
11206
11207                                 if s not in sets:
11208                                         if s in unknown_sets:
11209                                                 continue
11210                                         unknown_sets.add(s)
11211                                         out = portage.output.EOutput()
11212                                         out.eerror(("Unknown set '@%s' in " + \
11213                                                 "%svar/lib/portage/world_sets") % \
11214                                                 (s, root_config.root))
11215                                         continue
11216
11217                                 # only check instances of EditablePackageSet as other classes are generally used for
11218                                 # special purposes and can be ignored here (and are usually generated dynamically, so the
11219                                 # user can't do much about them anyway)
11220                                 if isinstance(sets[s], EditablePackageSet):
11221
11222                                         # This is derived from a snippet of code in the
11223                                         # depgraph._iter_atoms_for_pkg() method.
11224                                         for atom in sets[s].iterAtomsForPackage(pkg):
11225                                                 inst_matches = vartree.dbapi.match(atom)
11226                                                 inst_matches.reverse() # descending order
11227                                                 higher_slot = None
11228                                                 for inst_cpv in inst_matches:
11229                                                         try:
11230                                                                 inst_pkg = _pkg(inst_cpv)
11231                                                         except KeyError:
11232                                                                 # It could have been uninstalled
11233                                                                 # by a concurrent process.
11234                                                                 continue
11235
11236                                                         if inst_pkg.cp != atom.cp:
11237                                                                 continue
11238                                                         if pkg >= inst_pkg:
11239                                                                 # This is descending order, and we're not
11240                                                                 # interested in any versions <= pkg given.
11241                                                                 break
11242                                                         if pkg.slot_atom != inst_pkg.slot_atom:
11243                                                                 higher_slot = inst_pkg
11244                                                                 break
11245                                                 if higher_slot is None:
11246                                                         parents.append(s)
11247                                                         break
11248                         if parents:
11249                                 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
11250                                 #print colorize("WARN", "but still listed in the following package sets:")
11251                                 #print "    %s\n" % ", ".join(parents)
11252                                 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
11253                                 print colorize("WARN", "still referenced by the following package sets:")
11254                                 print "    %s\n" % ", ".join(parents)
11255                                 # adjust pkgmap so the display output is correct
11256                                 pkgmap[cp]["selected"].remove(cpv)
11257                                 all_selected.remove(cpv)
11258                                 pkgmap[cp]["protected"].add(cpv)
11259         
11260         del installed_sets
11261
11262         numselected = len(all_selected)
11263         if not numselected:
11264                 writemsg_level(
11265                         "\n>>> No packages selected for removal by " + \
11266                         unmerge_action + "\n")
11267                 return 0
11268
11269         # Unmerge order only matters in some cases
11270         if not ordered:
11271                 unordered = {}
11272                 for d in pkgmap:
11273                         selected = d["selected"]
11274                         if not selected:
11275                                 continue
11276                         cp = portage.cpv_getkey(iter(selected).next())
11277                         cp_dict = unordered.get(cp)
11278                         if cp_dict is None:
11279                                 cp_dict = {}
11280                                 unordered[cp] = cp_dict
11281                                 for k in d:
11282                                         cp_dict[k] = set()
11283                         for k, v in d.iteritems():
11284                                 cp_dict[k].update(v)
11285                 pkgmap = [unordered[cp] for cp in sorted(unordered)]
11286
11287         for x in xrange(len(pkgmap)):
11288                 selected = pkgmap[x]["selected"]
11289                 if not selected:
11290                         continue
11291                 for mytype, mylist in pkgmap[x].iteritems():
11292                         if mytype == "selected":
11293                                 continue
11294                         mylist.difference_update(all_selected)
11295                 cp = portage.cpv_getkey(iter(selected).next())
11296                 for y in localtree.dep_match(cp):
11297                         if y not in pkgmap[x]["omitted"] and \
11298                                 y not in pkgmap[x]["selected"] and \
11299                                 y not in pkgmap[x]["protected"] and \
11300                                 y not in all_selected:
11301                                 pkgmap[x]["omitted"].add(y)
11302                 if global_unmerge and not pkgmap[x]["selected"]:
11303                         #avoid cluttering the preview printout with stuff that isn't getting unmerged
11304                         continue
11305                 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
11306                         writemsg_level(colorize("BAD","\a\n\n!!! " + \
11307                                 "'%s' is part of your system profile.\n" % cp),
11308                                 level=logging.WARNING, noiselevel=-1)
11309                         writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
11310                                 "be damaging to your system.\n\n"),
11311                                 level=logging.WARNING, noiselevel=-1)
11312                         if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
11313                                 countdown(int(settings["EMERGE_WARNING_DELAY"]),
11314                                         colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
11315                 if not quiet:
11316                         writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
11317                 else:
11318                         writemsg_level(bold(cp) + ": ", noiselevel=-1)
11319                 for mytype in ["selected","protected","omitted"]:
11320                         if not quiet:
11321                                 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
11322                         if pkgmap[x][mytype]:
11323                                 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
11324                                 sorted_pkgs.sort(portage.pkgcmp)
11325                                 for pn, ver, rev in sorted_pkgs:
11326                                         if rev == "r0":
11327                                                 myversion = ver
11328                                         else:
11329                                                 myversion = ver + "-" + rev
11330                                         if mytype == "selected":
11331                                                 writemsg_level(
11332                                                         colorize("UNMERGE_WARN", myversion + " "),
11333                                                         noiselevel=-1)
11334                                         else:
11335                                                 writemsg_level(
11336                                                         colorize("GOOD", myversion + " "), noiselevel=-1)
11337                         else:
11338                                 writemsg_level("none ", noiselevel=-1)
11339                         if not quiet:
11340                                 writemsg_level("\n", noiselevel=-1)
11341                 if quiet:
11342                         writemsg_level("\n", noiselevel=-1)
11343
11344         writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
11345                 " packages are slated for removal.\n")
11346         writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
11347                         " and " + colorize("GOOD", "'omitted'") + \
11348                         " packages will not be removed.\n\n")
11349
11350         if "--pretend" in myopts:
11351                 #we're done... return
11352                 return 0
11353         if "--ask" in myopts:
11354                 if userquery("Would you like to unmerge these packages?")=="No":
11355                         # enter pretend mode for correct formatting of results
11356                         myopts["--pretend"] = True
11357                         print
11358                         print "Quitting."
11359                         print
11360                         return 0
11361         #the real unmerging begins, after a short delay....
11362         if clean_delay and not autoclean:
11363                 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
11364
11365         for x in xrange(len(pkgmap)):
11366                 for y in pkgmap[x]["selected"]:
11367                         writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
11368                         emergelog(xterm_titles, "=== Unmerging... ("+y+")")
11369                         mysplit = y.split("/")
11370                         #unmerge...
11371                         retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
11372                                 mysettings, unmerge_action not in ["clean","prune"],
11373                                 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
11374                                 scheduler=scheduler)
11375
11376                         if retval != os.EX_OK:
11377                                 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
11378                                 if raise_on_error:
11379                                         raise UninstallFailure(retval)
11380                                 sys.exit(retval)
11381                         else:
11382                                 if clean_world and hasattr(sets["world"], "cleanPackage"):
11383                                         sets["world"].cleanPackage(vartree.dbapi, y)
11384                                 emergelog(xterm_titles, " >>> unmerge success: "+y)
11385         if clean_world and hasattr(sets["world"], "remove"):
11386                 for s in root_config.setconfig.active:
11387                         sets["world"].remove(SETPREFIX+s)
11388         return 1
11389
11390 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
11391
11392         if os.path.exists("/usr/bin/install-info"):
11393                 out = portage.output.EOutput()
11394                 regen_infodirs=[]
11395                 for z in infodirs:
11396                         if z=='':
11397                                 continue
11398                         inforoot=normpath(root+z)
11399                         if os.path.isdir(inforoot):
11400                                 infomtime = long(os.stat(inforoot).st_mtime)
11401                                 if inforoot not in prev_mtimes or \
11402                                         prev_mtimes[inforoot] != infomtime:
11403                                                 regen_infodirs.append(inforoot)
11404
11405                 if not regen_infodirs:
11406                         portage.writemsg_stdout("\n")
11407                         out.einfo("GNU info directory index is up-to-date.")
11408                 else:
11409                         portage.writemsg_stdout("\n")
11410                         out.einfo("Regenerating GNU info directory index...")
11411
11412                         dir_extensions = ("", ".gz", ".bz2")
11413                         icount=0
11414                         badcount=0
11415                         errmsg = ""
11416                         for inforoot in regen_infodirs:
11417                                 if inforoot=='':
11418                                         continue
11419
11420                                 if not os.path.isdir(inforoot) or \
11421                                         not os.access(inforoot, os.W_OK):
11422                                         continue
11423
11424                                 file_list = os.listdir(inforoot)
11425                                 file_list.sort()
11426                                 dir_file = os.path.join(inforoot, "dir")
11427                                 moved_old_dir = False
11428                                 processed_count = 0
11429                                 for x in file_list:
11430                                         if x.startswith(".") or \
11431                                                 os.path.isdir(os.path.join(inforoot, x)):
11432                                                 continue
11433                                         if x.startswith("dir"):
11434                                                 skip = False
11435                                                 for ext in dir_extensions:
11436                                                         if x == "dir" + ext or \
11437                                                                 x == "dir" + ext + ".old":
11438                                                                 skip = True
11439                                                                 break
11440                                                 if skip:
11441                                                         continue
11442                                         if processed_count == 0:
11443                                                 for ext in dir_extensions:
11444                                                         try:
11445                                                                 os.rename(dir_file + ext, dir_file + ext + ".old")
11446                                                                 moved_old_dir = True
11447                                                         except EnvironmentError, e:
11448                                                                 if e.errno != errno.ENOENT:
11449                                                                         raise
11450                                                                 del e
11451                                         processed_count += 1
11452                                         myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
11453                                         existsstr="already exists, for file `"
11454                                         if myso!="":
11455                                                 if re.search(existsstr,myso):
11456                                                         # Already exists... Don't increment the count for this.
11457                                                         pass
11458                                                 elif myso[:44]=="install-info: warning: no info dir entry in ":
11459                                                         # This info file doesn't contain a DIR-header: install-info produces this
11460                                                         # (harmless) warning (the --quiet switch doesn't seem to work).
11461                                                         # Don't increment the count for this.
11462                                                         pass
11463                                                 else:
11464                                                         badcount=badcount+1
11465                                                         errmsg += myso + "\n"
11466                                         icount=icount+1
11467
11468                                 if moved_old_dir and not os.path.exists(dir_file):
11469                                         # We didn't generate a new dir file, so put the old file
11470                                         # back where it was originally found.
11471                                         for ext in dir_extensions:
11472                                                 try:
11473                                                         os.rename(dir_file + ext + ".old", dir_file + ext)
11474                                                 except EnvironmentError, e:
11475                                                         if e.errno != errno.ENOENT:
11476                                                                 raise
11477                                                         del e
11478
11479                                 # Clean dir.old cruft so that they don't prevent
11480                                 # unmerge of otherwise empty directories.
11481                                 for ext in dir_extensions:
11482                                         try:
11483                                                 os.unlink(dir_file + ext + ".old")
11484                                         except EnvironmentError, e:
11485                                                 if e.errno != errno.ENOENT:
11486                                                         raise
11487                                                 del e
11488
11489                                 #update mtime so we can potentially avoid regenerating.
11490                                 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
11491
11492                         if badcount:
11493                                 out.eerror("Processed %d info files; %d errors." % \
11494                                         (icount, badcount))
11495                                 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
11496                         else:
11497                                 if icount > 0:
11498                                         out.einfo("Processed %d info files." % (icount,))
11499
11500
11501 def display_news_notification(root_config, myopts):
11502         target_root = root_config.root
11503         trees = root_config.trees
11504         settings = trees["vartree"].settings
11505         portdb = trees["porttree"].dbapi
11506         vardb = trees["vartree"].dbapi
11507         NEWS_PATH = os.path.join("metadata", "news")
11508         UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
11509         newsReaderDisplay = False
11510         update = "--pretend" not in myopts
11511
11512         for repo in portdb.getRepositories():
11513                 unreadItems = checkUpdatedNewsItems(
11514                         portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
11515                 if unreadItems:
11516                         if not newsReaderDisplay:
11517                                 newsReaderDisplay = True
11518                                 print
11519                         print colorize("WARN", " * IMPORTANT:"),
11520                         print "%s news items need reading for repository '%s'." % (unreadItems, repo)
11521                         
11522         
11523         if newsReaderDisplay:
11524                 print colorize("WARN", " *"),
11525                 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
11526                 print
11527
11528 def display_preserved_libs(vardbapi):
11529         MAX_DISPLAY = 3
11530
11531         # Ensure the registry is consistent with existing files.
11532         vardbapi.plib_registry.pruneNonExisting()
11533
11534         if vardbapi.plib_registry.hasEntries():
11535                 print
11536                 print colorize("WARN", "!!!") + " existing preserved libs:"
11537                 plibdata = vardbapi.plib_registry.getPreservedLibs()
11538                 linkmap = vardbapi.linkmap
11539                 consumer_map = {}
11540                 owners = {}
11541                 linkmap_broken = False
11542
11543                 try:
11544                         linkmap.rebuild()
11545                 except portage.exception.CommandNotFound, e:
11546                         writemsg_level("!!! Command Not Found: %s\n" % (e,),
11547                                 level=logging.ERROR, noiselevel=-1)
11548                         del e
11549                         linkmap_broken = True
11550                 else:
11551                         search_for_owners = set()
11552                         for cpv in plibdata:
11553                                 internal_plib_keys = set(linkmap._obj_key(f) \
11554                                         for f in plibdata[cpv])
11555                                 for f in plibdata[cpv]:
11556                                         if f in consumer_map:
11557                                                 continue
11558                                         consumers = []
11559                                         for c in linkmap.findConsumers(f):
11560                                                 # Filter out any consumers that are also preserved libs
11561                                                 # belonging to the same package as the provider.
11562                                                 if linkmap._obj_key(c) not in internal_plib_keys:
11563                                                         consumers.append(c)
11564                                         consumers.sort()
11565                                         consumer_map[f] = consumers
11566                                         search_for_owners.update(consumers[:MAX_DISPLAY+1])
11567
11568                         owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
11569
11570                 for cpv in plibdata:
11571                         print colorize("WARN", ">>>") + " package: %s" % cpv
11572                         samefile_map = {}
11573                         for f in plibdata[cpv]:
11574                                 obj_key = linkmap._obj_key(f)
11575                                 alt_paths = samefile_map.get(obj_key)
11576                                 if alt_paths is None:
11577                                         alt_paths = set()
11578                                         samefile_map[obj_key] = alt_paths
11579                                 alt_paths.add(f)
11580
11581                         for alt_paths in samefile_map.itervalues():
11582                                 alt_paths = sorted(alt_paths)
11583                                 for p in alt_paths:
11584                                         print colorize("WARN", " * ") + " - %s" % (p,)
11585                                 f = alt_paths[0]
11586                                 consumers = consumer_map.get(f, [])
11587                                 for c in consumers[:MAX_DISPLAY]:
11588                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
11589                                                 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
11590                                 if len(consumers) == MAX_DISPLAY + 1:
11591                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
11592                                                 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
11593                                                 for x in owners.get(consumers[MAX_DISPLAY], [])))
11594                                 elif len(consumers) > MAX_DISPLAY:
11595                                         print colorize("WARN", " * ") + "     used by %d other files" % (len(consumers) - MAX_DISPLAY)
11596                 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
11597
11598
11599 def _flush_elog_mod_echo():
11600         """
11601         Dump the mod_echo output now so that our other
11602         notifications are shown last.
11603         @rtype: bool
11604         @returns: True if messages were shown, False otherwise.
11605         """
11606         messages_shown = False
11607         try:
11608                 from portage.elog import mod_echo
11609         except ImportError:
11610                 pass # happens during downgrade to a version without the module
11611         else:
11612                 messages_shown = bool(mod_echo._items)
11613                 mod_echo.finalize()
11614         return messages_shown
11615
11616 def post_emerge(root_config, myopts, mtimedb, retval):
11617         """
11618         Misc. things to run at the end of a merge session.
11619         
11620         Update Info Files
11621         Update Config Files
11622         Update News Items
11623         Commit mtimeDB
11624         Display preserved libs warnings
11625         Exit Emerge
11626
11627         @param trees: A dictionary mapping each ROOT to it's package databases
11628         @type trees: dict
11629         @param mtimedb: The mtimeDB to store data needed across merge invocations
11630         @type mtimedb: MtimeDB class instance
11631         @param retval: Emerge's return value
11632         @type retval: Int
11633         @rype: None
11634         @returns:
11635         1.  Calls sys.exit(retval)
11636         """
11637
11638         target_root = root_config.root
11639         trees = { target_root : root_config.trees }
11640         vardbapi = trees[target_root]["vartree"].dbapi
11641         settings = vardbapi.settings
11642         info_mtimes = mtimedb["info"]
11643
11644         # Load the most current variables from ${ROOT}/etc/profile.env
11645         settings.unlock()
11646         settings.reload()
11647         settings.regenerate()
11648         settings.lock()
11649
11650         config_protect = settings.get("CONFIG_PROTECT","").split()
11651         infodirs = settings.get("INFOPATH","").split(":") + \
11652                 settings.get("INFODIR","").split(":")
11653
11654         os.chdir("/")
11655
11656         if retval == os.EX_OK:
11657                 exit_msg = " *** exiting successfully."
11658         else:
11659                 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
11660         emergelog("notitles" not in settings.features, exit_msg)
11661
11662         _flush_elog_mod_echo()
11663
11664         counter_hash = settings.get("PORTAGE_COUNTER_HASH")
11665         if counter_hash is not None and \
11666                 counter_hash == vardbapi._counter_hash():
11667                 display_news_notification(root_config, myopts)
11668                 # If vdb state has not changed then there's nothing else to do.
11669                 sys.exit(retval)
11670
11671         vdb_path = os.path.join(target_root, portage.VDB_PATH)
11672         portage.util.ensure_dirs(vdb_path)
11673         vdb_lock = None
11674         if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
11675                 vdb_lock = portage.locks.lockdir(vdb_path)
11676
11677         if vdb_lock:
11678                 try:
11679                         if "noinfo" not in settings.features:
11680                                 chk_updated_info_files(target_root,
11681                                         infodirs, info_mtimes, retval)
11682                         mtimedb.commit()
11683                 finally:
11684                         if vdb_lock:
11685                                 portage.locks.unlockdir(vdb_lock)
11686
11687         chk_updated_cfg_files(target_root, config_protect)
11688         
11689         display_news_notification(root_config, myopts)
11690         if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
11691                 display_preserved_libs(vardbapi)        
11692
11693         sys.exit(retval)
11694
11695
11696 def chk_updated_cfg_files(target_root, config_protect):
11697         if config_protect:
11698                 #number of directories with some protect files in them
11699                 procount=0
11700                 for x in config_protect:
11701                         x = os.path.join(target_root, x.lstrip(os.path.sep))
11702                         if not os.access(x, os.W_OK):
11703                                 # Avoid Permission denied errors generated
11704                                 # later by `find`.
11705                                 continue
11706                         try:
11707                                 mymode = os.lstat(x).st_mode
11708                         except OSError:
11709                                 continue
11710                         if stat.S_ISLNK(mymode):
11711                                 # We want to treat it like a directory if it
11712                                 # is a symlink to an existing directory.
11713                                 try:
11714                                         real_mode = os.stat(x).st_mode
11715                                         if stat.S_ISDIR(real_mode):
11716                                                 mymode = real_mode
11717                                 except OSError:
11718                                         pass
11719                         if stat.S_ISDIR(mymode):
11720                                 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
11721                         else:
11722                                 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
11723                                         os.path.split(x.rstrip(os.path.sep))
11724                         mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
11725                         a = commands.getstatusoutput(mycommand)
11726                         if a[0] != 0:
11727                                 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
11728                                 sys.stderr.flush()
11729                                 # Show the error message alone, sending stdout to /dev/null.
11730                                 os.system(mycommand + " 1>/dev/null")
11731                         else:
11732                                 files = a[1].split('\0')
11733                                 # split always produces an empty string as the last element
11734                                 if files and not files[-1]:
11735                                         del files[-1]
11736                                 if files:
11737                                         procount += 1
11738                                         print "\n"+colorize("WARN", " * IMPORTANT:"),
11739                                         if stat.S_ISDIR(mymode):
11740                                                  print "%d config files in '%s' need updating." % \
11741                                                         (len(files), x)
11742                                         else:
11743                                                  print "config file '%s' needs updating." % x
11744
11745                 if procount:
11746                         print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
11747                                 " section of the " + bold("emerge")
11748                         print " "+yellow("*")+" man page to learn how to update config files."
11749
11750 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
11751         update=False):
11752         """
11753         Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
11754         Returns the number of unread (yet relevent) items.
11755         
11756         @param portdb: a portage tree database
11757         @type portdb: pordbapi
11758         @param vardb: an installed package database
11759         @type vardb: vardbapi
11760         @param NEWS_PATH:
11761         @type NEWS_PATH:
11762         @param UNREAD_PATH:
11763         @type UNREAD_PATH:
11764         @param repo_id:
11765         @type repo_id:
11766         @rtype: Integer
11767         @returns:
11768         1.  The number of unread but relevant news items.
11769         
11770         """
11771         from portage.news import NewsManager
11772         manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
11773         return manager.getUnreadItems( repo_id, update=update )
11774
11775 def insert_category_into_atom(atom, category):
11776         alphanum = re.search(r'\w', atom)
11777         if alphanum:
11778                 ret = atom[:alphanum.start()] + "%s/" % category + \
11779                         atom[alphanum.start():]
11780         else:
11781                 ret = None
11782         return ret
11783
11784 def is_valid_package_atom(x):
11785         if "/" not in x:
11786                 alphanum = re.search(r'\w', x)
11787                 if alphanum:
11788                         x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
11789         return portage.isvalidatom(x)
11790
11791 def show_blocker_docs_link():
11792         print
11793         print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
11794         print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
11795         print
11796         print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
11797         print
11798
11799 def show_mask_docs():
11800         print "For more information, see the MASKED PACKAGES section in the emerge"
11801         print "man page or refer to the Gentoo Handbook."
11802
11803 def action_sync(settings, trees, mtimedb, myopts, myaction):
11804         xterm_titles = "notitles" not in settings.features
11805         emergelog(xterm_titles, " === sync")
11806         myportdir = settings.get("PORTDIR", None)
11807         out = portage.output.EOutput()
11808         if not myportdir:
11809                 sys.stderr.write("!!! PORTDIR is undefined.  Is /etc/make.globals missing?\n")
11810                 sys.exit(1)
11811         if myportdir[-1]=="/":
11812                 myportdir=myportdir[:-1]
11813         try:
11814                 st = os.stat(myportdir)
11815         except OSError:
11816                 st = None
11817         if st is None:
11818                 print ">>>",myportdir,"not found, creating it."
11819                 os.makedirs(myportdir,0755)
11820                 st = os.stat(myportdir)
11821
11822         spawn_kwargs = {}
11823         spawn_kwargs["env"] = settings.environ()
11824         if portage.data.secpass >= 2 and \
11825                 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
11826                 st.st_gid != os.getgid() and st.st_mode & 0070):
11827                 try:
11828                         homedir = pwd.getpwuid(st.st_uid).pw_dir
11829                 except KeyError:
11830                         pass
11831                 else:
11832                         # Drop privileges when syncing, in order to match
11833                         # existing uid/gid settings.
11834                         spawn_kwargs["uid"]    = st.st_uid
11835                         spawn_kwargs["gid"]    = st.st_gid
11836                         spawn_kwargs["groups"] = [st.st_gid]
11837                         spawn_kwargs["env"]["HOME"] = homedir
11838                         umask = 0002
11839                         if not st.st_mode & 0020:
11840                                 umask = umask | 0020
11841                         spawn_kwargs["umask"] = umask
11842
11843         syncuri = settings.get("SYNC", "").strip()
11844         if not syncuri:
11845                 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
11846                         noiselevel=-1, level=logging.ERROR)
11847                 return 1
11848
11849         vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
11850         vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
11851
11852         os.umask(0022)
11853         dosyncuri = syncuri
11854         updatecache_flg = False
11855         if myaction == "metadata":
11856                 print "skipping sync"
11857                 updatecache_flg = True
11858         elif ".git" in vcs_dirs:
11859                 # Update existing git repository, and ignore the syncuri. We are
11860                 # going to trust the user and assume that the user is in the branch
11861                 # that he/she wants updated. We'll let the user manage branches with
11862                 # git directly.
11863                 msg = ">>> Starting git pull in %s..." % myportdir
11864                 emergelog(xterm_titles, msg )
11865                 writemsg_level(msg + "\n")
11866                 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
11867                         (portage._shell_quote(myportdir),), **spawn_kwargs)
11868                 if exitcode != os.EX_OK:
11869                         msg = "!!! git pull error in %s." % myportdir
11870                         emergelog(xterm_titles, msg)
11871                         writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
11872                         return exitcode
11873                 msg = ">>> Git pull in %s successful" % myportdir
11874                 emergelog(xterm_titles, msg)
11875                 writemsg_level(msg + "\n")
11876                 exitcode = git_sync_timestamps(settings, myportdir)
11877                 if exitcode == os.EX_OK:
11878                         updatecache_flg = True
11879         elif syncuri[:8]=="rsync://":
11880                 for vcs_dir in vcs_dirs:
11881                         writemsg_level(("!!! %s appears to be under revision " + \
11882                                 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
11883                                 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
11884                         return 1
11885                 if not os.path.exists("/usr/bin/rsync"):
11886                         print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
11887                         print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
11888                         sys.exit(1)
11889                 mytimeout=180
11890
11891                 rsync_opts = []
11892                 import shlex, StringIO
11893                 if settings["PORTAGE_RSYNC_OPTS"] == "":
11894                         portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
11895                         rsync_opts.extend([
11896                                 "--recursive",    # Recurse directories
11897                                 "--links",        # Consider symlinks
11898                                 "--safe-links",   # Ignore links outside of tree
11899                                 "--perms",        # Preserve permissions
11900                                 "--times",        # Preserive mod times
11901                                 "--compress",     # Compress the data transmitted
11902                                 "--force",        # Force deletion on non-empty dirs
11903                                 "--whole-file",   # Don't do block transfers, only entire files
11904                                 "--delete",       # Delete files that aren't in the master tree
11905                                 "--stats",        # Show final statistics about what was transfered
11906                                 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
11907                                 "--exclude=/distfiles",   # Exclude distfiles from consideration
11908                                 "--exclude=/local",       # Exclude local     from consideration
11909                                 "--exclude=/packages",    # Exclude packages  from consideration
11910                         ])
11911
11912                 else:
11913                         # The below validation is not needed when using the above hardcoded
11914                         # defaults.
11915
11916                         portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
11917                         lexer = shlex.shlex(StringIO.StringIO(
11918                                 settings.get("PORTAGE_RSYNC_OPTS","")), posix=True)
11919                         lexer.whitespace_split = True
11920                         rsync_opts.extend(lexer)
11921                         del lexer
11922
11923                         for opt in ("--recursive", "--times"):
11924                                 if opt not in rsync_opts:
11925                                         portage.writemsg(yellow("WARNING:") + " adding required option " + \
11926                                         "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
11927                                         rsync_opts.append(opt)
11928         
11929                         for exclude in ("distfiles", "local", "packages"):
11930                                 opt = "--exclude=/%s" % exclude
11931                                 if opt not in rsync_opts:
11932                                         portage.writemsg(yellow("WARNING:") + \
11933                                         " adding required option %s not included in "  % opt + \
11934                                         "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
11935                                         rsync_opts.append(opt)
11936         
11937                         if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
11938                                 def rsync_opt_startswith(opt_prefix):
11939                                         for x in rsync_opts:
11940                                                 if x.startswith(opt_prefix):
11941                                                         return True
11942                                         return False
11943
11944                                 if not rsync_opt_startswith("--timeout="):
11945                                         rsync_opts.append("--timeout=%d" % mytimeout)
11946
11947                                 for opt in ("--compress", "--whole-file"):
11948                                         if opt not in rsync_opts:
11949                                                 portage.writemsg(yellow("WARNING:") + " adding required option " + \
11950                                                 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
11951                                                 rsync_opts.append(opt)
11952
11953                 if "--quiet" in myopts:
11954                         rsync_opts.append("--quiet")    # Shut up a lot
11955                 else:
11956                         rsync_opts.append("--verbose")  # Print filelist
11957
11958                 if "--verbose" in myopts:
11959                         rsync_opts.append("--progress")  # Progress meter for each file
11960
11961                 if "--debug" in myopts:
11962                         rsync_opts.append("--checksum") # Force checksum on all files
11963
11964                 # Real local timestamp file.
11965                 servertimestampfile = os.path.join(
11966                         myportdir, "metadata", "timestamp.chk")
11967
11968                 content = portage.util.grabfile(servertimestampfile)
11969                 mytimestamp = 0
11970                 if content:
11971                         try:
11972                                 mytimestamp = time.mktime(time.strptime(content[0],
11973                                         "%a, %d %b %Y %H:%M:%S +0000"))
11974                         except (OverflowError, ValueError):
11975                                 pass
11976                 del content
11977
11978                 try:
11979                         rsync_initial_timeout = \
11980                                 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
11981                 except ValueError:
11982                         rsync_initial_timeout = 15
11983
11984                 try:
11985                         maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
11986                 except SystemExit, e:
11987                         raise # Needed else can't exit
11988                 except:
11989                         maxretries=3 #default number of retries
11990
11991                 retries=0
11992                 user_name, hostname, port = re.split(
11993                         "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
11994                 if port is None:
11995                         port=""
11996                 if user_name is None:
11997                         user_name=""
11998                 updatecache_flg=True
11999                 all_rsync_opts = set(rsync_opts)
12000                 lexer = shlex.shlex(StringIO.StringIO(
12001                         settings.get("PORTAGE_RSYNC_EXTRA_OPTS","")), posix=True)
12002                 lexer.whitespace_split = True
12003                 extra_rsync_opts = list(lexer)
12004                 del lexer
12005                 all_rsync_opts.update(extra_rsync_opts)
12006                 family = socket.AF_INET
12007                 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12008                         family = socket.AF_INET
12009                 elif socket.has_ipv6 and \
12010                         ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12011                         family = socket.AF_INET6
12012                 ips=[]
12013                 SERVER_OUT_OF_DATE = -1
12014                 EXCEEDED_MAX_RETRIES = -2
12015                 while (1):
12016                         if ips:
12017                                 del ips[0]
12018                         if ips==[]:
12019                                 try:
12020                                         for addrinfo in socket.getaddrinfo(
12021                                                 hostname, None, family, socket.SOCK_STREAM):
12022                                                 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12023                                                         # IPv6 addresses need to be enclosed in square brackets
12024                                                         ips.append("[%s]" % addrinfo[4][0])
12025                                                 else:
12026                                                         ips.append(addrinfo[4][0])
12027                                         from random import shuffle
12028                                         shuffle(ips)
12029                                 except SystemExit, e:
12030                                         raise # Needed else can't exit
12031                                 except Exception, e:
12032                                         print "Notice:",str(e)
12033                                         dosyncuri=syncuri
12034
12035                         if ips:
12036                                 try:
12037                                         dosyncuri = syncuri.replace(
12038                                                 "//" + user_name + hostname + port + "/",
12039                                                 "//" + user_name + ips[0] + port + "/", 1)
12040                                 except SystemExit, e:
12041                                         raise # Needed else can't exit
12042                                 except Exception, e:
12043                                         print "Notice:",str(e)
12044                                         dosyncuri=syncuri
12045
12046                         if (retries==0):
12047                                 if "--ask" in myopts:
12048                                         if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12049                                                 print
12050                                                 print "Quitting."
12051                                                 print
12052                                                 sys.exit(0)
12053                                 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12054                                 if "--quiet" not in myopts:
12055                                         print ">>> Starting rsync with "+dosyncuri+"..."
12056                         else:
12057                                 emergelog(xterm_titles,
12058                                         ">>> Starting retry %d of %d with %s" % \
12059                                                 (retries,maxretries,dosyncuri))
12060                                 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
12061
12062                         if mytimestamp != 0 and "--quiet" not in myopts:
12063                                 print ">>> Checking server timestamp ..."
12064
12065                         rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
12066
12067                         if "--debug" in myopts:
12068                                 print rsynccommand
12069
12070                         exitcode = os.EX_OK
12071                         servertimestamp = 0
12072                         # Even if there's no timestamp available locally, fetch the
12073                         # timestamp anyway as an initial probe to verify that the server is
12074                         # responsive.  This protects us from hanging indefinitely on a
12075                         # connection attempt to an unresponsive server which rsync's
12076                         # --timeout option does not prevent.
12077                         if True:
12078                                 # Temporary file for remote server timestamp comparison.
12079                                 from tempfile import mkstemp
12080                                 fd, tmpservertimestampfile = mkstemp()
12081                                 os.close(fd)
12082                                 mycommand = rsynccommand[:]
12083                                 mycommand.append(dosyncuri.rstrip("/") + \
12084                                         "/metadata/timestamp.chk")
12085                                 mycommand.append(tmpservertimestampfile)
12086                                 content = None
12087                                 mypids = []
12088                                 try:
12089                                         def timeout_handler(signum, frame):
12090                                                 raise portage.exception.PortageException("timed out")
12091                                         signal.signal(signal.SIGALRM, timeout_handler)
12092                                         # Timeout here in case the server is unresponsive.  The
12093                                         # --timeout rsync option doesn't apply to the initial
12094                                         # connection attempt.
12095                                         if rsync_initial_timeout:
12096                                                 signal.alarm(rsync_initial_timeout)
12097                                         try:
12098                                                 mypids.extend(portage.process.spawn(
12099                                                         mycommand, env=settings.environ(), returnpid=True))
12100                                                 exitcode = os.waitpid(mypids[0], 0)[1]
12101                                                 content = portage.grabfile(tmpservertimestampfile)
12102                                         finally:
12103                                                 if rsync_initial_timeout:
12104                                                         signal.alarm(0)
12105                                                 try:
12106                                                         os.unlink(tmpservertimestampfile)
12107                                                 except OSError:
12108                                                         pass
12109                                 except portage.exception.PortageException, e:
12110                                         # timed out
12111                                         print e
12112                                         del e
12113                                         if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
12114                                                 os.kill(mypids[0], signal.SIGTERM)
12115                                                 os.waitpid(mypids[0], 0)
12116                                         # This is the same code rsync uses for timeout.
12117                                         exitcode = 30
12118                                 else:
12119                                         if exitcode != os.EX_OK:
12120                                                 if exitcode & 0xff:
12121                                                         exitcode = (exitcode & 0xff) << 8
12122                                                 else:
12123                                                         exitcode = exitcode >> 8
12124                                 if mypids:
12125                                         portage.process.spawned_pids.remove(mypids[0])
12126                                 if content:
12127                                         try:
12128                                                 servertimestamp = time.mktime(time.strptime(
12129                                                         content[0], "%a, %d %b %Y %H:%M:%S +0000"))
12130                                         except (OverflowError, ValueError):
12131                                                 pass
12132                                 del mycommand, mypids, content
12133                         if exitcode == os.EX_OK:
12134                                 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
12135                                         emergelog(xterm_titles,
12136                                                 ">>> Cancelling sync -- Already current.")
12137                                         print
12138                                         print ">>>"
12139                                         print ">>> Timestamps on the server and in the local repository are the same."
12140                                         print ">>> Cancelling all further sync action. You are already up to date."
12141                                         print ">>>"
12142                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
12143                                         print ">>>"
12144                                         print
12145                                         sys.exit(0)
12146                                 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
12147                                         emergelog(xterm_titles,
12148                                                 ">>> Server out of date: %s" % dosyncuri)
12149                                         print
12150                                         print ">>>"
12151                                         print ">>> SERVER OUT OF DATE: %s" % dosyncuri
12152                                         print ">>>"
12153                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
12154                                         print ">>>"
12155                                         print
12156                                         exitcode = SERVER_OUT_OF_DATE
12157                                 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
12158                                         # actual sync
12159                                         mycommand = rsynccommand + [dosyncuri+"/", myportdir]
12160                                         exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
12161                                         if exitcode in [0,1,3,4,11,14,20,21]:
12162                                                 break
12163                         elif exitcode in [1,3,4,11,14,20,21]:
12164                                 break
12165                         else:
12166                                 # Code 2 indicates protocol incompatibility, which is expected
12167                                 # for servers with protocol < 29 that don't support
12168                                 # --prune-empty-directories.  Retry for a server that supports
12169                                 # at least rsync protocol version 29 (>=rsync-2.6.4).
12170                                 pass
12171
12172                         retries=retries+1
12173
12174                         if retries<=maxretries:
12175                                 print ">>> Retrying..."
12176                                 time.sleep(11)
12177                         else:
12178                                 # over retries
12179                                 # exit loop
12180                                 updatecache_flg=False
12181                                 exitcode = EXCEEDED_MAX_RETRIES
12182                                 break
12183
12184                 if (exitcode==0):
12185                         emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
12186                 elif exitcode == SERVER_OUT_OF_DATE:
12187                         sys.exit(1)
12188                 elif exitcode == EXCEEDED_MAX_RETRIES:
12189                         sys.stderr.write(
12190                                 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
12191                         sys.exit(1)
12192                 elif (exitcode>0):
12193                         msg = []
12194                         if exitcode==1:
12195                                 msg.append("Rsync has reported that there is a syntax error. Please ensure")
12196                                 msg.append("that your SYNC statement is proper.")
12197                                 msg.append("SYNC=" + settings["SYNC"])
12198                         elif exitcode==11:
12199                                 msg.append("Rsync has reported that there is a File IO error. Normally")
12200                                 msg.append("this means your disk is full, but can be caused by corruption")
12201                                 msg.append("on the filesystem that contains PORTDIR. Please investigate")
12202                                 msg.append("and try again after the problem has been fixed.")
12203                                 msg.append("PORTDIR=" + settings["PORTDIR"])
12204                         elif exitcode==20:
12205                                 msg.append("Rsync was killed before it finished.")
12206                         else:
12207                                 msg.append("Rsync has not successfully finished. It is recommended that you keep")
12208                                 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
12209                                 msg.append("to use rsync due to firewall or other restrictions. This should be a")
12210                                 msg.append("temporary problem unless complications exist with your network")
12211                                 msg.append("(and possibly your system's filesystem) configuration.")
12212                         for line in msg:
12213                                 out.eerror(line)
12214                         sys.exit(exitcode)
12215         elif syncuri[:6]=="cvs://":
12216                 if not os.path.exists("/usr/bin/cvs"):
12217                         print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
12218                         print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
12219                         sys.exit(1)
12220                 cvsroot=syncuri[6:]
12221                 cvsdir=os.path.dirname(myportdir)
12222                 if not os.path.exists(myportdir+"/CVS"):
12223                         #initial checkout
12224                         print ">>> Starting initial cvs checkout with "+syncuri+"..."
12225                         if os.path.exists(cvsdir+"/gentoo-x86"):
12226                                 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
12227                                 sys.exit(1)
12228                         try:
12229                                 os.rmdir(myportdir)
12230                         except OSError, e:
12231                                 if e.errno != errno.ENOENT:
12232                                         sys.stderr.write(
12233                                                 "!!! existing '%s' directory; exiting.\n" % myportdir)
12234                                         sys.exit(1)
12235                                 del e
12236                         if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
12237                                 print "!!! cvs checkout error; exiting."
12238                                 sys.exit(1)
12239                         os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
12240                 else:
12241                         #cvs update
12242                         print ">>> Starting cvs update with "+syncuri+"..."
12243                         retval = portage.process.spawn_bash(
12244                                 "cd %s; cvs -z0 -q update -dP" % \
12245                                 (portage._shell_quote(myportdir),), **spawn_kwargs)
12246                         if retval != os.EX_OK:
12247                                 sys.exit(retval)
12248                 dosyncuri = syncuri
12249         else:
12250                 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
12251                         noiselevel=-1, level=logging.ERROR)
12252                 return 1
12253
12254         if updatecache_flg and  \
12255                 myaction != "metadata" and \
12256                 "metadata-transfer" not in settings.features:
12257                 updatecache_flg = False
12258
12259         # Reload the whole config from scratch.
12260         settings, trees, mtimedb = load_emerge_config(trees=trees)
12261         root_config = trees[settings["ROOT"]]["root_config"]
12262         portdb = trees[settings["ROOT"]]["porttree"].dbapi
12263
12264         if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
12265                 action_metadata(settings, portdb, myopts)
12266
12267         if portage._global_updates(trees, mtimedb["updates"]):
12268                 mtimedb.commit()
12269                 # Reload the whole config from scratch.
12270                 settings, trees, mtimedb = load_emerge_config(trees=trees)
12271                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12272                 root_config = trees[settings["ROOT"]]["root_config"]
12273
12274         mybestpv = portdb.xmatch("bestmatch-visible",
12275                 portage.const.PORTAGE_PACKAGE_ATOM)
12276         mypvs = portage.best(
12277                 trees[settings["ROOT"]]["vartree"].dbapi.match(
12278                 portage.const.PORTAGE_PACKAGE_ATOM))
12279
12280         chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
12281
12282         if myaction != "metadata":
12283                 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
12284                         retval = portage.process.spawn(
12285                                 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
12286                                 dosyncuri], env=settings.environ())
12287                         if retval != os.EX_OK:
12288                                 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
12289
12290         if(mybestpv != mypvs) and not "--quiet" in myopts:
12291                 print
12292                 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
12293                 print red(" * ")+"that you update portage now, before any other packages are updated."
12294                 print
12295                 print red(" * ")+"To update portage, run 'emerge portage' now."
12296                 print
12297         
12298         display_news_notification(root_config, myopts)
12299         return os.EX_OK
12300
12301 def git_sync_timestamps(settings, portdir):
12302         """
12303         Since git doesn't preserve timestamps, synchronize timestamps between
12304         entries and ebuilds/eclasses. Assume the cache has the correct timestamp
12305         for a given file as long as the file in the working tree is not modified
12306         (relative to HEAD).
12307         """
12308         cache_dir = os.path.join(portdir, "metadata", "cache")
12309         if not os.path.isdir(cache_dir):
12310                 return os.EX_OK
12311         writemsg_level(">>> Synchronizing timestamps...\n")
12312
12313         from portage.cache.cache_errors import CacheError
12314         try:
12315                 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
12316                         portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
12317         except CacheError, e:
12318                 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
12319                         level=logging.ERROR, noiselevel=-1)
12320                 return 1
12321
12322         ec_dir = os.path.join(portdir, "eclass")
12323         try:
12324                 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
12325                         if f.endswith(".eclass"))
12326         except OSError, e:
12327                 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
12328                         level=logging.ERROR, noiselevel=-1)
12329                 return 1
12330
12331         args = [portage.const.BASH_BINARY, "-c",
12332                 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
12333                 portage._shell_quote(portdir)]
12334         import subprocess
12335         proc = subprocess.Popen(args, stdout=subprocess.PIPE)
12336         modified_files = set(l.rstrip("\n") for l in proc.stdout)
12337         rval = proc.wait()
12338         if rval != os.EX_OK:
12339                 return rval
12340
12341         modified_eclasses = set(ec for ec in ec_names \
12342                 if os.path.join("eclass", ec + ".eclass") in modified_files)
12343
12344         updated_ec_mtimes = {}
12345
12346         for cpv in cache_db:
12347                 cpv_split = portage.catpkgsplit(cpv)
12348                 if cpv_split is None:
12349                         writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
12350                                 level=logging.ERROR, noiselevel=-1)
12351                         continue
12352
12353                 cat, pn, ver, rev = cpv_split
12354                 cat, pf = portage.catsplit(cpv)
12355                 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
12356                 if relative_eb_path in modified_files:
12357                         continue
12358
12359                 try:
12360                         cache_entry = cache_db[cpv]
12361                         eb_mtime = cache_entry.get("_mtime_")
12362                         ec_mtimes = cache_entry.get("_eclasses_")
12363                 except KeyError:
12364                         writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
12365                                 level=logging.ERROR, noiselevel=-1)
12366                         continue
12367                 except CacheError, e:
12368                         writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
12369                                 (cpv, e), level=logging.ERROR, noiselevel=-1)
12370                         continue
12371
12372                 if eb_mtime is None:
12373                         writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
12374                                 level=logging.ERROR, noiselevel=-1)
12375                         continue
12376
12377                 try:
12378                         eb_mtime = long(eb_mtime)
12379                 except ValueError:
12380                         writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
12381                                 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
12382                         continue
12383
12384                 if ec_mtimes is None:
12385                         writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
12386                                 level=logging.ERROR, noiselevel=-1)
12387                         continue
12388
12389                 if modified_eclasses.intersection(ec_mtimes):
12390                         continue
12391
12392                 missing_eclasses = set(ec_mtimes).difference(ec_names)
12393                 if missing_eclasses:
12394                         writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
12395                                 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
12396                                 noiselevel=-1)
12397                         continue
12398
12399                 eb_path = os.path.join(portdir, relative_eb_path)
12400                 try:
12401                         current_eb_mtime = os.stat(eb_path)
12402                 except OSError:
12403                         writemsg_level("!!! Missing ebuild: %s\n" % \
12404                                 (cpv,), level=logging.ERROR, noiselevel=-1)
12405                         continue
12406
12407                 inconsistent = False
12408                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
12409                         updated_mtime = updated_ec_mtimes.get(ec)
12410                         if updated_mtime is not None and updated_mtime != ec_mtime:
12411                                 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
12412                                         (cpv, ec), level=logging.ERROR, noiselevel=-1)
12413                                 inconsistent = True
12414                                 break
12415
12416                 if inconsistent:
12417                         continue
12418
12419                 if current_eb_mtime != eb_mtime:
12420                         os.utime(eb_path, (eb_mtime, eb_mtime))
12421
12422                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
12423                         if ec in updated_ec_mtimes:
12424                                 continue
12425                         ec_path = os.path.join(ec_dir, ec + ".eclass")
12426                         current_mtime = long(os.stat(ec_path).st_mtime)
12427                         if current_mtime != ec_mtime:
12428                                 os.utime(ec_path, (ec_mtime, ec_mtime))
12429                         updated_ec_mtimes[ec] = ec_mtime
12430
12431         return os.EX_OK
12432
12433 def action_metadata(settings, portdb, myopts):
12434         portage.writemsg_stdout("\n>>> Updating Portage cache:      ")
12435         old_umask = os.umask(0002)
12436         cachedir = os.path.normpath(settings.depcachedir)
12437         if cachedir in ["/",    "/bin", "/dev",  "/etc",  "/home",
12438                                         "/lib", "/opt", "/proc", "/root", "/sbin",
12439                                         "/sys", "/tmp", "/usr",  "/var"]:
12440                 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
12441                         "ROOT DIRECTORY ON YOUR SYSTEM."
12442                 print >> sys.stderr, \
12443                         "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
12444                 sys.exit(73)
12445         if not os.path.exists(cachedir):
12446                 os.mkdir(cachedir)
12447
12448         ec = portage.eclass_cache.cache(portdb.porttree_root)
12449         myportdir = os.path.realpath(settings["PORTDIR"])
12450         cm = settings.load_best_module("portdbapi.metadbmodule")(
12451                 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
12452
12453         from portage.cache import util
12454
12455         class percentage_noise_maker(util.quiet_mirroring):
12456                 def __init__(self, dbapi):
12457                         self.dbapi = dbapi
12458                         self.cp_all = dbapi.cp_all()
12459                         l = len(self.cp_all)
12460                         self.call_update_min = 100000000
12461                         self.min_cp_all = l/100.0
12462                         self.count = 1
12463                         self.pstr = ''
12464
12465                 def __iter__(self):
12466                         for x in self.cp_all:
12467                                 self.count += 1
12468                                 if self.count > self.min_cp_all:
12469                                         self.call_update_min = 0
12470                                         self.count = 0
12471                                 for y in self.dbapi.cp_list(x):
12472                                         yield y
12473                         self.call_update_mine = 0
12474
12475                 def update(self, *arg):
12476                         try:                            self.pstr = int(self.pstr) + 1
12477                         except ValueError:      self.pstr = 1
12478                         sys.stdout.write("%s%i%%" % \
12479                                 ("\b" * (len(str(self.pstr))+1), self.pstr))
12480                         sys.stdout.flush()
12481                         self.call_update_min = 10000000
12482
12483                 def finish(self, *arg):
12484                         sys.stdout.write("\b\b\b\b100%\n")
12485                         sys.stdout.flush()
12486
12487         if "--quiet" in myopts:
12488                 def quicky_cpv_generator(cp_all_list):
12489                         for x in cp_all_list:
12490                                 for y in portdb.cp_list(x):
12491                                         yield y
12492                 source = quicky_cpv_generator(portdb.cp_all())
12493                 noise_maker = portage.cache.util.quiet_mirroring()
12494         else:
12495                 noise_maker = source = percentage_noise_maker(portdb)
12496         portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
12497                 eclass_cache=ec, verbose_instance=noise_maker)
12498
12499         sys.stdout.flush()
12500         os.umask(old_umask)
12501
12502 def action_regen(settings, portdb, max_jobs, max_load):
12503         xterm_titles = "notitles" not in settings.features
12504         emergelog(xterm_titles, " === regen")
12505         #regenerate cache entries
12506         portage.writemsg_stdout("Regenerating cache entries...\n")
12507         try:
12508                 os.close(sys.stdin.fileno())
12509         except SystemExit, e:
12510                 raise # Needed else can't exit
12511         except:
12512                 pass
12513         sys.stdout.flush()
12514
12515         regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
12516         regen.run()
12517
12518         portage.writemsg_stdout("done!\n")
12519
12520 def action_config(settings, trees, myopts, myfiles):
12521         if len(myfiles) != 1:
12522                 print red("!!! config can only take a single package atom at this time\n")
12523                 sys.exit(1)
12524         if not is_valid_package_atom(myfiles[0]):
12525                 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
12526                         noiselevel=-1)
12527                 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
12528                 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
12529                 sys.exit(1)
12530         print
12531         try:
12532                 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
12533         except portage.exception.AmbiguousPackageName, e:
12534                 # Multiple matches thrown from cpv_expand
12535                 pkgs = e.args[0]
12536         if len(pkgs) == 0:
12537                 print "No packages found.\n"
12538                 sys.exit(0)
12539         elif len(pkgs) > 1:
12540                 if "--ask" in myopts:
12541                         options = []
12542                         print "Please select a package to configure:"
12543                         idx = 0
12544                         for pkg in pkgs:
12545                                 idx += 1
12546                                 options.append(str(idx))
12547                                 print options[-1]+") "+pkg
12548                         print "X) Cancel"
12549                         options.append("X")
12550                         idx = userquery("Selection?", options)
12551                         if idx == "X":
12552                                 sys.exit(0)
12553                         pkg = pkgs[int(idx)-1]
12554                 else:
12555                         print "The following packages available:"
12556                         for pkg in pkgs:
12557                                 print "* "+pkg
12558                         print "\nPlease use a specific atom or the --ask option."
12559                         sys.exit(1)
12560         else:
12561                 pkg = pkgs[0]
12562
12563         print
12564         if "--ask" in myopts:
12565                 if userquery("Ready to configure "+pkg+"?") == "No":
12566                         sys.exit(0)
12567         else:
12568                 print "Configuring pkg..."
12569         print
12570         ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
12571         mysettings = portage.config(clone=settings)
12572         vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
12573         debug = mysettings.get("PORTAGE_DEBUG") == "1"
12574         retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
12575                 mysettings,
12576                 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
12577                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
12578         if retval == os.EX_OK:
12579                 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
12580                         mysettings, debug=debug, mydbapi=vardb, tree="vartree")
12581         print
12582
12583 def action_info(settings, trees, myopts, myfiles):
12584         print getportageversion(settings["PORTDIR"], settings["ROOT"],
12585                 settings.profile_path, settings["CHOST"],
12586                 trees[settings["ROOT"]]["vartree"].dbapi)
12587         header_width = 65
12588         header_title = "System Settings"
12589         if myfiles:
12590                 print header_width * "="
12591                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
12592         print header_width * "="
12593         print "System uname: "+platform.platform(aliased=1)
12594
12595         lastSync = portage.grabfile(os.path.join(
12596                 settings["PORTDIR"], "metadata", "timestamp.chk"))
12597         print "Timestamp of tree:",
12598         if lastSync:
12599                 print lastSync[0]
12600         else:
12601                 print "Unknown"
12602
12603         output=commands.getstatusoutput("distcc --version")
12604         if not output[0]:
12605                 print str(output[1].split("\n",1)[0]),
12606                 if "distcc" in settings.features:
12607                         print "[enabled]"
12608                 else:
12609                         print "[disabled]"
12610
12611         output=commands.getstatusoutput("ccache -V")
12612         if not output[0]:
12613                 print str(output[1].split("\n",1)[0]),
12614                 if "ccache" in settings.features:
12615                         print "[enabled]"
12616                 else:
12617                         print "[disabled]"
12618
12619         myvars  = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
12620                    "sys-devel/binutils", "sys-devel/libtool",  "dev-lang/python"]
12621         myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
12622         myvars  = portage.util.unique_array(myvars)
12623         myvars.sort()
12624
12625         for x in myvars:
12626                 if portage.isvalidatom(x):
12627                         pkg_matches = trees["/"]["vartree"].dbapi.match(x)
12628                         pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
12629                         pkg_matches.sort(portage.pkgcmp)
12630                         pkgs = []
12631                         for pn, ver, rev in pkg_matches:
12632                                 if rev != "r0":
12633                                         pkgs.append(ver + "-" + rev)
12634                                 else:
12635                                         pkgs.append(ver)
12636                         if pkgs:
12637                                 pkgs = ", ".join(pkgs)
12638                                 print "%-20s %s" % (x+":", pkgs)
12639                 else:
12640                         print "%-20s %s" % (x+":", "[NOT VALID]")
12641
12642         libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
12643
12644         if "--verbose" in myopts:
12645                 myvars=settings.keys()
12646         else:
12647                 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
12648                           'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
12649                           'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
12650                           'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
12651
12652                 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
12653
12654         myvars = portage.util.unique_array(myvars)
12655         unset_vars = []
12656         myvars.sort()
12657         for x in myvars:
12658                 if x in settings:
12659                         if x != "USE":
12660                                 print '%s="%s"' % (x, settings[x])
12661                         else:
12662                                 use = set(settings["USE"].split())
12663                                 use_expand = settings["USE_EXPAND"].split()
12664                                 use_expand.sort()
12665                                 for varname in use_expand:
12666                                         flag_prefix = varname.lower() + "_"
12667                                         for f in list(use):
12668                                                 if f.startswith(flag_prefix):
12669                                                         use.remove(f)
12670                                 use = list(use)
12671                                 use.sort()
12672                                 print 'USE="%s"' % " ".join(use),
12673                                 for varname in use_expand:
12674                                         myval = settings.get(varname)
12675                                         if myval:
12676                                                 print '%s="%s"' % (varname, myval),
12677                                 print
12678                 else:
12679                         unset_vars.append(x)
12680         if unset_vars:
12681                 print "Unset:  "+", ".join(unset_vars)
12682         print
12683
12684         if "--debug" in myopts:
12685                 for x in dir(portage):
12686                         module = getattr(portage, x)
12687                         if "cvs_id_string" in dir(module):
12688                                 print "%s: %s" % (str(x), str(module.cvs_id_string))
12689
12690         # See if we can find any packages installed matching the strings
12691         # passed on the command line
12692         mypkgs = []
12693         vardb = trees[settings["ROOT"]]["vartree"].dbapi
12694         portdb = trees[settings["ROOT"]]["porttree"].dbapi
12695         for x in myfiles:
12696                 mypkgs.extend(vardb.match(x))
12697
12698         # If some packages were found...
12699         if mypkgs:
12700                 # Get our global settings (we only print stuff if it varies from
12701                 # the current config)
12702                 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
12703                 auxkeys = mydesiredvars + [ "USE", "IUSE"]
12704                 global_vals = {}
12705                 pkgsettings = portage.config(clone=settings)
12706
12707                 for myvar in mydesiredvars:
12708                         global_vals[myvar] = set(settings.get(myvar, "").split())
12709
12710                 # Loop through each package
12711                 # Only print settings if they differ from global settings
12712                 header_title = "Package Settings"
12713                 print header_width * "="
12714                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
12715                 print header_width * "="
12716                 from portage.output import EOutput
12717                 out = EOutput()
12718                 for pkg in mypkgs:
12719                         # Get all package specific variables
12720                         auxvalues = vardb.aux_get(pkg, auxkeys)
12721                         valuesmap = {}
12722                         for i in xrange(len(auxkeys)):
12723                                 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
12724                         diff_values = {}
12725                         for myvar in mydesiredvars:
12726                                 # If the package variable doesn't match the
12727                                 # current global variable, something has changed
12728                                 # so set diff_found so we know to print
12729                                 if valuesmap[myvar] != global_vals[myvar]:
12730                                         diff_values[myvar] = valuesmap[myvar]
12731                         valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
12732                         valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
12733                         pkgsettings.reset()
12734                         # If a matching ebuild is no longer available in the tree, maybe it
12735                         # would make sense to compare against the flags for the best
12736                         # available version with the same slot?
12737                         mydb = None
12738                         if portdb.cpv_exists(pkg):
12739                                 mydb = portdb
12740                         pkgsettings.setcpv(pkg, mydb=mydb)
12741                         if valuesmap["IUSE"].intersection(
12742                                 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
12743                                 diff_values["USE"] = valuesmap["USE"]
12744                         # If a difference was found, print the info for
12745                         # this package.
12746                         if diff_values:
12747                                 # Print package info
12748                                 print "%s was built with the following:" % pkg
12749                                 for myvar in mydesiredvars + ["USE"]:
12750                                         if myvar in diff_values:
12751                                                 mylist = list(diff_values[myvar])
12752                                                 mylist.sort()
12753                                                 print "%s=\"%s\"" % (myvar, " ".join(mylist))
12754                                 print
12755                         print ">>> Attempting to run pkg_info() for '%s'" % pkg
12756                         ebuildpath = vardb.findname(pkg)
12757                         if not ebuildpath or not os.path.exists(ebuildpath):
12758                                 out.ewarn("No ebuild found for '%s'" % pkg)
12759                                 continue
12760                         portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
12761                                 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
12762                                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
12763                                 tree="vartree")
12764
12765 def action_search(root_config, myopts, myfiles, spinner):
12766         if not myfiles:
12767                 print "emerge: no search terms provided."
12768         else:
12769                 searchinstance = search(root_config,
12770                         spinner, "--searchdesc" in myopts,
12771                         "--quiet" not in myopts, "--usepkg" in myopts,
12772                         "--usepkgonly" in myopts)
12773                 for mysearch in myfiles:
12774                         try:
12775                                 searchinstance.execute(mysearch)
12776                         except re.error, comment:
12777                                 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
12778                                 sys.exit(1)
12779                         searchinstance.output()
12780
12781 def action_depclean(settings, trees, ldpath_mtimes,
12782         myopts, action, myfiles, spinner):
12783         # Kill packages that aren't explicitly merged or are required as a
12784         # dependency of another package. World file is explicit.
12785
12786         # Global depclean or prune operations are not very safe when there are
12787         # missing dependencies since it's unknown how badly incomplete
12788         # the dependency graph is, and we might accidentally remove packages
12789         # that should have been pulled into the graph. On the other hand, it's
12790         # relatively safe to ignore missing deps when only asked to remove
12791         # specific packages.
12792         allow_missing_deps = len(myfiles) > 0
12793
12794         msg = []
12795         msg.append("Always study the list of packages to be cleaned for any obvious\n")
12796         msg.append("mistakes. Packages that are part of the world set will always\n")
12797         msg.append("be kept.  They can be manually added to this set with\n")
12798         msg.append(good("`emerge --noreplace <atom>`") + ".  Packages that are listed in\n")
12799         msg.append("package.provided (see portage(5)) will be removed by\n")
12800         msg.append("depclean, even if they are part of the world set.\n")
12801         msg.append("\n")
12802         msg.append("As a safety measure, depclean will not remove any packages\n")
12803         msg.append("unless *all* required dependencies have been resolved.  As a\n")
12804         msg.append("consequence, it is often necessary to run %s\n" % \
12805                 good("`emerge --update"))
12806         msg.append(good("--newuse --deep @system @world`") + \
12807                 " prior to depclean.\n")
12808
12809         if action == "depclean" and "--quiet" not in myopts and not myfiles:
12810                 portage.writemsg_stdout("\n")
12811                 for x in msg:
12812                         portage.writemsg_stdout(colorize("WARN", " * ") + x)
12813
12814         xterm_titles = "notitles" not in settings.features
12815         myroot = settings["ROOT"]
12816         root_config = trees[myroot]["root_config"]
12817         getSetAtoms = root_config.setconfig.getSetAtoms
12818         vardb = trees[myroot]["vartree"].dbapi
12819
12820         required_set_names = ("system", "world")
12821         required_sets = {}
12822         set_args = []
12823
12824         for s in required_set_names:
12825                 required_sets[s] = InternalPackageSet(
12826                         initial_atoms=getSetAtoms(s))
12827
12828         
12829         # When removing packages, use a temporary version of world
12830         # which excludes packages that are intended to be eligible for
12831         # removal.
12832         world_temp_set = required_sets["world"]
12833         system_set = required_sets["system"]
12834
12835         if not system_set or not world_temp_set:
12836
12837                 if not system_set:
12838                         writemsg_level("!!! You have no system list.\n",
12839                                 level=logging.ERROR, noiselevel=-1)
12840
12841                 if not world_temp_set:
12842                         writemsg_level("!!! You have no world file.\n",
12843                                         level=logging.WARNING, noiselevel=-1)
12844
12845                 writemsg_level("!!! Proceeding is likely to " + \
12846                         "break your installation.\n",
12847                         level=logging.WARNING, noiselevel=-1)
12848                 if "--pretend" not in myopts:
12849                         countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
12850
12851         if action == "depclean":
12852                 emergelog(xterm_titles, " >>> depclean")
12853
12854         import textwrap
12855         args_set = InternalPackageSet()
12856         if myfiles:
12857                 for x in myfiles:
12858                         if not is_valid_package_atom(x):
12859                                 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
12860                                         level=logging.ERROR, noiselevel=-1)
12861                                 writemsg_level("!!! Please check ebuild(5) for full details.\n")
12862                                 return
12863                         try:
12864                                 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
12865                         except portage.exception.AmbiguousPackageName, e:
12866                                 msg = "The short ebuild name \"" + x + \
12867                                         "\" is ambiguous.  Please specify " + \
12868                                         "one of the following " + \
12869                                         "fully-qualified ebuild names instead:"
12870                                 for line in textwrap.wrap(msg, 70):
12871                                         writemsg_level("!!! %s\n" % (line,),
12872                                                 level=logging.ERROR, noiselevel=-1)
12873                                 for i in e[0]:
12874                                         writemsg_level("    %s\n" % colorize("INFORM", i),
12875                                                 level=logging.ERROR, noiselevel=-1)
12876                                 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
12877                                 return
12878                         args_set.add(atom)
12879                 matched_packages = False
12880                 for x in args_set:
12881                         if vardb.match(x):
12882                                 matched_packages = True
12883                                 break
12884                 if not matched_packages:
12885                         writemsg_level(">>> No packages selected for removal by %s\n" % \
12886                                 action)
12887                         return
12888
12889         writemsg_level("\nCalculating dependencies  ")
12890         resolver_params = create_depgraph_params(myopts, "remove")
12891         resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
12892         vardb = resolver.trees[myroot]["vartree"].dbapi
12893
12894         if action == "depclean":
12895
12896                 if args_set:
12897                         # Pull in everything that's installed but not matched
12898                         # by an argument atom since we don't want to clean any
12899                         # package if something depends on it.
12900
12901                         world_temp_set.clear()
12902                         for pkg in vardb:
12903                                 spinner.update()
12904
12905                                 try:
12906                                         if args_set.findAtomForPackage(pkg) is None:
12907                                                 world_temp_set.add("=" + pkg.cpv)
12908                                                 continue
12909                                 except portage.exception.InvalidDependString, e:
12910                                         show_invalid_depstring_notice(pkg,
12911                                                 pkg.metadata["PROVIDE"], str(e))
12912                                         del e
12913                                         world_temp_set.add("=" + pkg.cpv)
12914                                         continue
12915
12916         elif action == "prune":
12917
12918                 # Pull in everything that's installed since we don't
12919                 # to prune a package if something depends on it.
12920                 world_temp_set.clear()
12921                 world_temp_set.update(vardb.cp_all())
12922
12923                 if not args_set:
12924
12925                         # Try to prune everything that's slotted.
12926                         for cp in vardb.cp_all():
12927                                 if len(vardb.cp_list(cp)) > 1:
12928                                         args_set.add(cp)
12929
12930                 # Remove atoms from world that match installed packages
12931                 # that are also matched by argument atoms, but do not remove
12932                 # them if they match the highest installed version.
12933                 for pkg in vardb:
12934                         spinner.update()
12935                         pkgs_for_cp = vardb.match_pkgs(pkg.cp)
12936                         if not pkgs_for_cp or pkg not in pkgs_for_cp:
12937                                 raise AssertionError("package expected in matches: " + \
12938                                         "cp = %s, cpv = %s matches = %s" % \
12939                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
12940
12941                         highest_version = pkgs_for_cp[-1]
12942                         if pkg == highest_version:
12943                                 # pkg is the highest version
12944                                 world_temp_set.add("=" + pkg.cpv)
12945                                 continue
12946
12947                         if len(pkgs_for_cp) <= 1:
12948                                 raise AssertionError("more packages expected: " + \
12949                                         "cp = %s, cpv = %s matches = %s" % \
12950                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
12951
12952                         try:
12953                                 if args_set.findAtomForPackage(pkg) is None:
12954                                         world_temp_set.add("=" + pkg.cpv)
12955                                         continue
12956                         except portage.exception.InvalidDependString, e:
12957                                 show_invalid_depstring_notice(pkg,
12958                                         pkg.metadata["PROVIDE"], str(e))
12959                                 del e
12960                                 world_temp_set.add("=" + pkg.cpv)
12961                                 continue
12962
12963         set_args = {}
12964         for s, package_set in required_sets.iteritems():
12965                 set_atom = SETPREFIX + s
12966                 set_arg = SetArg(arg=set_atom, set=package_set,
12967                         root_config=resolver.roots[myroot])
12968                 set_args[s] = set_arg
12969                 for atom in set_arg.set:
12970                         resolver._dep_stack.append(
12971                                 Dependency(atom=atom, root=myroot, parent=set_arg))
12972                         resolver.digraph.add(set_arg, None)
12973
12974         success = resolver._complete_graph()
12975         writemsg_level("\b\b... done!\n")
12976
12977         resolver.display_problems()
12978
12979         if not success:
12980                 return 1
12981
12982         def unresolved_deps():
12983
12984                 unresolvable = set()
12985                 for dep in resolver._initially_unsatisfied_deps:
12986                         if isinstance(dep.parent, Package) and \
12987                                 (dep.priority > UnmergeDepPriority.SOFT):
12988                                 unresolvable.add((dep.atom, dep.parent.cpv))
12989
12990                 if not unresolvable:
12991                         return False
12992
12993                 if unresolvable and not allow_missing_deps:
12994                         prefix = bad(" * ")
12995                         msg = []
12996                         msg.append("Dependencies could not be completely resolved due to")
12997                         msg.append("the following required packages not being installed:")
12998                         msg.append("")
12999                         for atom, parent in unresolvable:
13000                                 msg.append("  %s pulled in by:" % (atom,))
13001                                 msg.append("    %s" % (parent,))
13002                                 msg.append("")
13003                         msg.append("Have you forgotten to run " + \
13004                                 good("`emerge --update --newuse --deep @system @world`") + " prior")
13005                         msg.append(("to %s? It may be necessary to manually " + \
13006                                 "uninstall packages that no longer") % action)
13007                         msg.append("exist in the portage tree since " + \
13008                                 "it may not be possible to satisfy their")
13009                         msg.append("dependencies.  Also, be aware of " + \
13010                                 "the --with-bdeps option that is documented")
13011                         msg.append("in " + good("`man emerge`") + ".")
13012                         if action == "prune":
13013                                 msg.append("")
13014                                 msg.append("If you would like to ignore " + \
13015                                         "dependencies then use %s." % good("--nodeps"))
13016                         writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
13017                                 level=logging.ERROR, noiselevel=-1)
13018                         return True
13019                 return False
13020
13021         if unresolved_deps():
13022                 return 1
13023
13024         graph = resolver.digraph.copy()
13025         required_pkgs_total = 0
13026         for node in graph:
13027                 if isinstance(node, Package):
13028                         required_pkgs_total += 1
13029
13030         def show_parents(child_node):
13031                 parent_nodes = graph.parent_nodes(child_node)
13032                 if not parent_nodes:
13033                         # With --prune, the highest version can be pulled in without any
13034                         # real parent since all installed packages are pulled in.  In that
13035                         # case there's nothing to show here.
13036                         return
13037                 parent_strs = []
13038                 for node in parent_nodes:
13039                         parent_strs.append(str(getattr(node, "cpv", node)))
13040                 parent_strs.sort()
13041                 msg = []
13042                 msg.append("  %s pulled in by:\n" % (child_node.cpv,))
13043                 for parent_str in parent_strs:
13044                         msg.append("    %s\n" % (parent_str,))
13045                 msg.append("\n")
13046                 portage.writemsg_stdout("".join(msg), noiselevel=-1)
13047
13048         def create_cleanlist():
13049                 pkgs_to_remove = []
13050
13051                 if action == "depclean":
13052                         if args_set:
13053
13054                                 for pkg in vardb:
13055                                         arg_atom = None
13056                                         try:
13057                                                 arg_atom = args_set.findAtomForPackage(pkg)
13058                                         except portage.exception.InvalidDependString:
13059                                                 # this error has already been displayed by now
13060                                                 continue
13061
13062                                         if arg_atom:
13063                                                 if pkg not in graph:
13064                                                         pkgs_to_remove.append(pkg)
13065                                                 elif "--verbose" in myopts:
13066                                                         show_parents(pkg)
13067
13068                         else:
13069                                 for pkg in vardb:
13070                                         if pkg not in graph:
13071                                                 pkgs_to_remove.append(pkg)
13072                                         elif "--verbose" in myopts:
13073                                                 show_parents(pkg)
13074
13075                 elif action == "prune":
13076                         # Prune really uses all installed instead of world. It's not
13077                         # a real reverse dependency so don't display it as such.
13078                         graph.remove(set_args["world"])
13079
13080                         for atom in args_set:
13081                                 for pkg in vardb.match_pkgs(atom):
13082                                         if pkg not in graph:
13083                                                 pkgs_to_remove.append(pkg)
13084                                         elif "--verbose" in myopts:
13085                                                 show_parents(pkg)
13086
13087                 if not pkgs_to_remove:
13088                         writemsg_level(
13089                                 ">>> No packages selected for removal by %s\n" % action)
13090                         if "--verbose" not in myopts:
13091                                 writemsg_level(
13092                                         ">>> To see reverse dependencies, use %s\n" % \
13093                                                 good("--verbose"))
13094                         if action == "prune":
13095                                 writemsg_level(
13096                                         ">>> To ignore dependencies, use %s\n" % \
13097                                                 good("--nodeps"))
13098
13099                 return pkgs_to_remove
13100
13101         cleanlist = create_cleanlist()
13102
13103         if len(cleanlist):
13104                 clean_set = set(cleanlist)
13105
13106                 # Check if any of these package are the sole providers of libraries
13107                 # with consumers that have not been selected for removal. If so, these
13108                 # packages and any dependencies need to be added to the graph.
13109                 real_vardb = trees[myroot]["vartree"].dbapi
13110                 linkmap = real_vardb.linkmap
13111                 liblist = linkmap.listLibraryObjects()
13112                 consumer_cache = {}
13113                 provider_cache = {}
13114                 soname_cache = {}
13115                 consumer_map = {}
13116
13117                 writemsg_level(">>> Checking for lib consumers...\n")
13118
13119                 for pkg in cleanlist:
13120                         pkg_dblink = real_vardb._dblink(pkg.cpv)
13121                         provided_libs = set()
13122
13123                         for lib in liblist:
13124                                 if pkg_dblink.isowner(lib, myroot):
13125                                         provided_libs.add(lib)
13126
13127                         if not provided_libs:
13128                                 continue
13129
13130                         consumers = {}
13131                         for lib in provided_libs:
13132                                 lib_consumers = consumer_cache.get(lib)
13133                                 if lib_consumers is None:
13134                                         lib_consumers = linkmap.findConsumers(lib)
13135                                         consumer_cache[lib] = lib_consumers
13136                                 if lib_consumers:
13137                                         consumers[lib] = lib_consumers
13138
13139                         if not consumers:
13140                                 continue
13141
13142                         for lib, lib_consumers in consumers.items():
13143                                 for consumer_file in list(lib_consumers):
13144                                         if pkg_dblink.isowner(consumer_file, myroot):
13145                                                 lib_consumers.remove(consumer_file)
13146                                 if not lib_consumers:
13147                                         del consumers[lib]
13148
13149                         if not consumers:
13150                                 continue
13151
13152                         for lib, lib_consumers in consumers.iteritems():
13153
13154                                 soname = soname_cache.get(lib)
13155                                 if soname is None:
13156                                         soname = linkmap.getSoname(lib)
13157                                         soname_cache[lib] = soname
13158
13159                                 consumer_providers = []
13160                                 for lib_consumer in lib_consumers:
13161                                         providers = provider_cache.get(lib)
13162                                         if providers is None:
13163                                                 providers = linkmap.findProviders(lib_consumer)
13164                                                 provider_cache[lib_consumer] = providers
13165                                         if soname not in providers:
13166                                                 # Why does this happen?
13167                                                 continue
13168                                         consumer_providers.append(
13169                                                 (lib_consumer, providers[soname]))
13170
13171                                 consumers[lib] = consumer_providers
13172
13173                         consumer_map[pkg] = consumers
13174
13175                 if consumer_map:
13176
13177                         search_files = set()
13178                         for consumers in consumer_map.itervalues():
13179                                 for lib, consumer_providers in consumers.iteritems():
13180                                         for lib_consumer, providers in consumer_providers:
13181                                                 search_files.add(lib_consumer)
13182                                                 search_files.update(providers)
13183
13184                         writemsg_level(">>> Assigning files to packages...\n")
13185                         file_owners = real_vardb._owners.getFileOwnerMap(search_files)
13186
13187                         for pkg, consumers in consumer_map.items():
13188                                 for lib, consumer_providers in consumers.items():
13189                                         lib_consumers = set()
13190
13191                                         for lib_consumer, providers in consumer_providers:
13192                                                 owner_set = file_owners.get(lib_consumer)
13193                                                 provider_dblinks = set()
13194                                                 provider_pkgs = set()
13195
13196                                                 if len(providers) > 1:
13197                                                         for provider in providers:
13198                                                                 provider_set = file_owners.get(provider)
13199                                                                 if provider_set is not None:
13200                                                                         provider_dblinks.update(provider_set)
13201
13202                                                 if len(provider_dblinks) > 1:
13203                                                         for provider_dblink in provider_dblinks:
13204                                                                 pkg_key = ("installed", myroot,
13205                                                                         provider_dblink.mycpv, "nomerge")
13206                                                                 if pkg_key not in clean_set:
13207                                                                         provider_pkgs.add(vardb.get(pkg_key))
13208
13209                                                 if provider_pkgs:
13210                                                         continue
13211
13212                                                 if owner_set is not None:
13213                                                         lib_consumers.update(owner_set)
13214
13215                                         for consumer_dblink in list(lib_consumers):
13216                                                 if ("installed", myroot, consumer_dblink.mycpv,
13217                                                         "nomerge") in clean_set:
13218                                                         lib_consumers.remove(consumer_dblink)
13219                                                         continue
13220
13221                                         if lib_consumers:
13222                                                 consumers[lib] = lib_consumers
13223                                         else:
13224                                                 del consumers[lib]
13225                                 if not consumers:
13226                                         del consumer_map[pkg]
13227
13228                 if consumer_map:
13229                         # TODO: Implement a package set for rebuilding consumer packages.
13230
13231                         msg = "In order to avoid breakage of link level " + \
13232                                 "dependencies, one or more packages will not be removed. " + \
13233                                 "This can be solved by rebuilding " + \
13234                                 "the packages that pulled them in."
13235
13236                         prefix = bad(" * ")
13237                         from textwrap import wrap
13238                         writemsg_level("".join(prefix + "%s\n" % line for \
13239                                 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
13240
13241                         msg = []
13242                         for pkg, consumers in consumer_map.iteritems():
13243                                 unique_consumers = set(chain(*consumers.values()))
13244                                 unique_consumers = sorted(consumer.mycpv \
13245                                         for consumer in unique_consumers)
13246                                 msg.append("")
13247                                 msg.append("  %s pulled in by:" % (pkg.cpv,))
13248                                 for consumer in unique_consumers:
13249                                         msg.append("    %s" % (consumer,))
13250                         msg.append("")
13251                         writemsg_level("".join(prefix + "%s\n" % line for line in msg),
13252                                 level=logging.WARNING, noiselevel=-1)
13253
13254                         # Add lib providers to the graph as children of lib consumers,
13255                         # and also add any dependencies pulled in by the provider.
13256                         writemsg_level(">>> Adding lib providers to graph...\n")
13257
13258                         for pkg, consumers in consumer_map.iteritems():
13259                                 for consumer_dblink in set(chain(*consumers.values())):
13260                                         consumer_pkg = vardb.get(("installed", myroot,
13261                                                 consumer_dblink.mycpv, "nomerge"))
13262                                         if not resolver._add_pkg(pkg,
13263                                                 Dependency(parent=consumer_pkg,
13264                                                 priority=UnmergeDepPriority(runtime=True),
13265                                                 root=pkg.root)):
13266                                                 resolver.display_problems()
13267                                                 return 1
13268
13269                         writemsg_level("\nCalculating dependencies  ")
13270                         success = resolver._complete_graph()
13271                         writemsg_level("\b\b... done!\n")
13272                         resolver.display_problems()
13273                         if not success:
13274                                 return 1
13275                         if unresolved_deps():
13276                                 return 1
13277
13278                         graph = resolver.digraph.copy()
13279                         required_pkgs_total = 0
13280                         for node in graph:
13281                                 if isinstance(node, Package):
13282                                         required_pkgs_total += 1
13283                         cleanlist = create_cleanlist()
13284                         if not cleanlist:
13285                                 return 0
13286                         clean_set = set(cleanlist)
13287
13288                 # Use a topological sort to create an unmerge order such that
13289                 # each package is unmerged before it's dependencies. This is
13290                 # necessary to avoid breaking things that may need to run
13291                 # during pkg_prerm or pkg_postrm phases.
13292
13293                 # Create a new graph to account for dependencies between the
13294                 # packages being unmerged.
13295                 graph = digraph()
13296                 del cleanlist[:]
13297
13298                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
13299                 runtime = UnmergeDepPriority(runtime=True)
13300                 runtime_post = UnmergeDepPriority(runtime_post=True)
13301                 buildtime = UnmergeDepPriority(buildtime=True)
13302                 priority_map = {
13303                         "RDEPEND": runtime,
13304                         "PDEPEND": runtime_post,
13305                         "DEPEND": buildtime,
13306                 }
13307
13308                 for node in clean_set:
13309                         graph.add(node, None)
13310                         mydeps = []
13311                         node_use = node.metadata["USE"].split()
13312                         for dep_type in dep_keys:
13313                                 depstr = node.metadata[dep_type]
13314                                 if not depstr:
13315                                         continue
13316                                 try:
13317                                         portage.dep._dep_check_strict = False
13318                                         success, atoms = portage.dep_check(depstr, None, settings,
13319                                                 myuse=node_use, trees=resolver._graph_trees,
13320                                                 myroot=myroot)
13321                                 finally:
13322                                         portage.dep._dep_check_strict = True
13323                                 if not success:
13324                                         # Ignore invalid deps of packages that will
13325                                         # be uninstalled anyway.
13326                                         continue
13327
13328                                 priority = priority_map[dep_type]
13329                                 for atom in atoms:
13330                                         if not isinstance(atom, portage.dep.Atom):
13331                                                 # Ignore invalid atoms returned from dep_check().
13332                                                 continue
13333                                         if atom.blocker:
13334                                                 continue
13335                                         matches = vardb.match_pkgs(atom)
13336                                         if not matches:
13337                                                 continue
13338                                         for child_node in matches:
13339                                                 if child_node in clean_set:
13340                                                         graph.add(child_node, node, priority=priority)
13341
13342                 ordered = True
13343                 if len(graph.order) == len(graph.root_nodes()):
13344                         # If there are no dependencies between packages
13345                         # let unmerge() group them by cat/pn.
13346                         ordered = False
13347                         cleanlist = [pkg.cpv for pkg in graph.order]
13348                 else:
13349                         # Order nodes from lowest to highest overall reference count for
13350                         # optimal root node selection.
13351                         node_refcounts = {}
13352                         for node in graph.order:
13353                                 node_refcounts[node] = len(graph.parent_nodes(node))
13354                         def cmp_reference_count(node1, node2):
13355                                 return node_refcounts[node1] - node_refcounts[node2]
13356                         graph.order.sort(cmp_reference_count)
13357         
13358                         ignore_priority_range = [None]
13359                         ignore_priority_range.extend(
13360                                 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
13361                         while not graph.empty():
13362                                 for ignore_priority in ignore_priority_range:
13363                                         nodes = graph.root_nodes(ignore_priority=ignore_priority)
13364                                         if nodes:
13365                                                 break
13366                                 if not nodes:
13367                                         raise AssertionError("no root nodes")
13368                                 if ignore_priority is not None:
13369                                         # Some deps have been dropped due to circular dependencies,
13370                                         # so only pop one node in order do minimize the number that
13371                                         # are dropped.
13372                                         del nodes[1:]
13373                                 for node in nodes:
13374                                         graph.remove(node)
13375                                         cleanlist.append(node.cpv)
13376
13377                 unmerge(root_config, myopts, "unmerge", cleanlist,
13378                         ldpath_mtimes, ordered=ordered)
13379
13380         if action == "prune":
13381                 return
13382
13383         if not cleanlist and "--quiet" in myopts:
13384                 return
13385
13386         print "Packages installed:   "+str(len(vardb.cpv_all()))
13387         print "Packages in world:    " + \
13388                 str(len(root_config.sets["world"].getAtoms()))
13389         print "Packages in system:   " + \
13390                 str(len(root_config.sets["system"].getAtoms()))
13391         print "Required packages:    "+str(required_pkgs_total)
13392         if "--pretend" in myopts:
13393                 print "Number to remove:     "+str(len(cleanlist))
13394         else:
13395                 print "Number removed:       "+str(len(cleanlist))
13396
13397 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner,
13398         skip_masked=False, skip_unsatisfied=False):
13399         """
13400         Construct a depgraph for the given resume list. This will raise
13401         PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
13402         @rtype: tuple
13403         @returns: (success, depgraph, dropped_tasks)
13404         """
13405         mergelist = mtimedb["resume"]["mergelist"]
13406         dropped_tasks = set()
13407         while True:
13408                 mydepgraph = depgraph(settings, trees,
13409                         myopts, myparams, spinner)
13410                 try:
13411                         success = mydepgraph.loadResumeCommand(mtimedb["resume"],
13412                                 skip_masked=skip_masked)
13413                 except depgraph.UnsatisfiedResumeDep, e:
13414                         if not skip_unsatisfied:
13415                                 raise
13416
13417                         graph = mydepgraph.digraph
13418                         unsatisfied_parents = dict((dep.parent, dep.parent) \
13419                                 for dep in e.value)
13420                         traversed_nodes = set()
13421                         unsatisfied_stack = list(unsatisfied_parents)
13422                         while unsatisfied_stack:
13423                                 pkg = unsatisfied_stack.pop()
13424                                 if pkg in traversed_nodes:
13425                                         continue
13426                                 traversed_nodes.add(pkg)
13427
13428                                 # If this package was pulled in by a parent
13429                                 # package scheduled for merge, removing this
13430                                 # package may cause the the parent package's
13431                                 # dependency to become unsatisfied.
13432                                 for parent_node in graph.parent_nodes(pkg):
13433                                         if not isinstance(parent_node, Package) \
13434                                                 or parent_node.operation not in ("merge", "nomerge"):
13435                                                 continue
13436                                         unsatisfied = \
13437                                                 graph.child_nodes(parent_node,
13438                                                 ignore_priority=DepPriority.SOFT)
13439                                         if pkg in unsatisfied:
13440                                                 unsatisfied_parents[parent_node] = parent_node
13441                                                 unsatisfied_stack.append(parent_node)
13442
13443                         pruned_mergelist = [x for x in mergelist \
13444                                 if isinstance(x, list) and \
13445                                 tuple(x) not in unsatisfied_parents]
13446
13447                         # If the mergelist doesn't shrink then this loop is infinite.
13448                         if len(pruned_mergelist) == len(mergelist):
13449                                 # This happens if a package can't be dropped because
13450                                 # it's already installed, but it has unsatisfied PDEPEND.
13451                                 raise
13452                         mergelist[:] = pruned_mergelist
13453
13454                         # Exclude installed packages that have been removed from the graph due
13455                         # to failure to build/install runtime dependencies after the dependent
13456                         # package has already been installed.
13457                         dropped_tasks.update(pkg for pkg in \
13458                                 unsatisfied_parents if pkg.operation != "nomerge")
13459                         mydepgraph.break_refs(unsatisfied_parents)
13460
13461                         del e, graph, traversed_nodes, \
13462                                 unsatisfied_parents, unsatisfied_stack
13463                         continue
13464                 else:
13465                         break
13466         return (success, mydepgraph, dropped_tasks)
13467
13468 def action_build(settings, trees, mtimedb,
13469         myopts, myaction, myfiles, spinner):
13470
13471         # validate the state of the resume data
13472         # so that we can make assumptions later.
13473         for k in ("resume", "resume_backup"):
13474                 if k not in mtimedb:
13475                         continue
13476                 resume_data = mtimedb[k]
13477                 if not isinstance(resume_data, dict):
13478                         del mtimedb[k]
13479                         continue
13480                 mergelist = resume_data.get("mergelist")
13481                 if not isinstance(mergelist, list):
13482                         del mtimedb[k]
13483                         continue
13484                 for x in mergelist:
13485                         if not (isinstance(x, list) and len(x) == 4):
13486                                 continue
13487                         pkg_type, pkg_root, pkg_key, pkg_action = x
13488                         if pkg_root not in trees:
13489                                 # Current $ROOT setting differs,
13490                                 # so the list must be stale.
13491                                 mergelist = None
13492                                 break
13493                 if not mergelist:
13494                         del mtimedb[k]
13495                         continue
13496                 resume_opts = resume_data.get("myopts")
13497                 if not isinstance(resume_opts, (dict, list)):
13498                         del mtimedb[k]
13499                         continue
13500                 favorites = resume_data.get("favorites")
13501                 if not isinstance(favorites, list):
13502                         del mtimedb[k]
13503                         continue
13504
13505         resume = False
13506         if "--resume" in myopts and \
13507                 ("resume" in mtimedb or
13508                 "resume_backup" in mtimedb):
13509                 resume = True
13510                 if "resume" not in mtimedb:
13511                         mtimedb["resume"] = mtimedb["resume_backup"]
13512                         del mtimedb["resume_backup"]
13513                         mtimedb.commit()
13514                 # "myopts" is a list for backward compatibility.
13515                 resume_opts = mtimedb["resume"].get("myopts", [])
13516                 if isinstance(resume_opts, list):
13517                         resume_opts = dict((k,True) for k in resume_opts)
13518                 for opt in ("--skipfirst", "--ask", "--tree"):
13519                         resume_opts.pop(opt, None)
13520                 myopts.update(resume_opts)
13521
13522                 if "--debug" in myopts:
13523                         writemsg_level("myopts %s\n" % (myopts,))
13524
13525                 # Adjust config according to options of the command being resumed.
13526                 for myroot in trees:
13527                         mysettings =  trees[myroot]["vartree"].settings
13528                         mysettings.unlock()
13529                         adjust_config(myopts, mysettings)
13530                         mysettings.lock()
13531                         del myroot, mysettings
13532
13533         ldpath_mtimes = mtimedb["ldpath"]
13534         favorites=[]
13535         merge_count = 0
13536         buildpkgonly = "--buildpkgonly" in myopts
13537         pretend = "--pretend" in myopts
13538         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
13539         ask = "--ask" in myopts
13540         nodeps = "--nodeps" in myopts
13541         oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
13542         tree = "--tree" in myopts
13543         if nodeps and tree:
13544                 tree = False
13545                 del myopts["--tree"]
13546                 portage.writemsg(colorize("WARN", " * ") + \
13547                         "--tree is broken with --nodeps. Disabling...\n")
13548         debug = "--debug" in myopts
13549         verbose = "--verbose" in myopts
13550         quiet = "--quiet" in myopts
13551         if pretend or fetchonly:
13552                 # make the mtimedb readonly
13553                 mtimedb.filename = None
13554         if "--digest" in myopts:
13555                 msg = "The --digest option can prevent corruption from being" + \
13556                         " noticed. The `repoman manifest` command is the preferred" + \
13557                         " way to generate manifests and it is capable of doing an" + \
13558                         " entire repository or category at once."
13559                 prefix = bad(" * ")
13560                 writemsg(prefix + "\n")
13561                 from textwrap import wrap
13562                 for line in wrap(msg, 72):
13563                         writemsg("%s%s\n" % (prefix, line))
13564                 writemsg(prefix + "\n")
13565
13566         if "--quiet" not in myopts and \
13567                 ("--pretend" in myopts or "--ask" in myopts or \
13568                 "--tree" in myopts or "--verbose" in myopts):
13569                 action = ""
13570                 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
13571                         action = "fetched"
13572                 elif "--buildpkgonly" in myopts:
13573                         action = "built"
13574                 else:
13575                         action = "merged"
13576                 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
13577                         print
13578                         print darkgreen("These are the packages that would be %s, in reverse order:") % action
13579                         print
13580                 else:
13581                         print
13582                         print darkgreen("These are the packages that would be %s, in order:") % action
13583                         print
13584
13585         show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
13586         if not show_spinner:
13587                 spinner.update = spinner.update_quiet
13588
13589         if resume:
13590                 favorites = mtimedb["resume"].get("favorites")
13591                 if not isinstance(favorites, list):
13592                         favorites = []
13593
13594                 if show_spinner:
13595                         print "Calculating dependencies  ",
13596                 myparams = create_depgraph_params(myopts, myaction)
13597
13598                 resume_data = mtimedb["resume"]
13599                 mergelist = resume_data["mergelist"]
13600                 if mergelist and "--skipfirst" in myopts:
13601                         for i, task in enumerate(mergelist):
13602                                 if isinstance(task, list) and \
13603                                         task and task[-1] == "merge":
13604                                         del mergelist[i]
13605                                         break
13606
13607                 skip_masked      = "--skipfirst" in myopts
13608                 skip_unsatisfied = "--skipfirst" in myopts
13609                 success = False
13610                 mydepgraph = None
13611                 try:
13612                         success, mydepgraph, dropped_tasks = resume_depgraph(
13613                                 settings, trees, mtimedb, myopts, myparams, spinner,
13614                                 skip_masked=skip_masked, skip_unsatisfied=skip_unsatisfied)
13615                 except (portage.exception.PackageNotFound,
13616                         depgraph.UnsatisfiedResumeDep), e:
13617                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
13618                                 mydepgraph = e.depgraph
13619                         if show_spinner:
13620                                 print
13621                         from textwrap import wrap
13622                         from portage.output import EOutput
13623                         out = EOutput()
13624
13625                         resume_data = mtimedb["resume"]
13626                         mergelist = resume_data.get("mergelist")
13627                         if not isinstance(mergelist, list):
13628                                 mergelist = []
13629                         if mergelist and debug or (verbose and not quiet):
13630                                 out.eerror("Invalid resume list:")
13631                                 out.eerror("")
13632                                 indent = "  "
13633                                 for task in mergelist:
13634                                         if isinstance(task, list):
13635                                                 out.eerror(indent + str(tuple(task)))
13636                                 out.eerror("")
13637
13638                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
13639                                 out.eerror("One or more packages are either masked or " + \
13640                                         "have missing dependencies:")
13641                                 out.eerror("")
13642                                 indent = "  "
13643                                 for dep in e.value:
13644                                         if dep.atom is None:
13645                                                 out.eerror(indent + "Masked package:")
13646                                                 out.eerror(2 * indent + str(dep.parent))
13647                                                 out.eerror("")
13648                                         else:
13649                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
13650                                                 out.eerror(2 * indent + str(dep.parent))
13651                                                 out.eerror("")
13652                                 msg = "The resume list contains packages " + \
13653                                         "that are either masked or have " + \
13654                                         "unsatisfied dependencies. " + \
13655                                         "Please restart/continue " + \
13656                                         "the operation manually, or use --skipfirst " + \
13657                                         "to skip the first package in the list and " + \
13658                                         "any other packages that may be " + \
13659                                         "masked or have missing dependencies."
13660                                 for line in wrap(msg, 72):
13661                                         out.eerror(line)
13662                         elif isinstance(e, portage.exception.PackageNotFound):
13663                                 out.eerror("An expected package is " + \
13664                                         "not available: %s" % str(e))
13665                                 out.eerror("")
13666                                 msg = "The resume list contains one or more " + \
13667                                         "packages that are no longer " + \
13668                                         "available. Please restart/continue " + \
13669                                         "the operation manually."
13670                                 for line in wrap(msg, 72):
13671                                         out.eerror(line)
13672                 else:
13673                         if show_spinner:
13674                                 print "\b\b... done!"
13675
13676                 if success:
13677                         if dropped_tasks:
13678                                 portage.writemsg("!!! One or more packages have been " + \
13679                                         "dropped due to\n" + \
13680                                         "!!! masking or unsatisfied dependencies:\n\n",
13681                                         noiselevel=-1)
13682                                 for task in dropped_tasks:
13683                                         portage.writemsg("  " + str(task) + "\n", noiselevel=-1)
13684                                 portage.writemsg("\n", noiselevel=-1)
13685                         del dropped_tasks
13686                 else:
13687                         if mydepgraph is not None:
13688                                 mydepgraph.display_problems()
13689                         if not (ask or pretend):
13690                                 # delete the current list and also the backup
13691                                 # since it's probably stale too.
13692                                 for k in ("resume", "resume_backup"):
13693                                         mtimedb.pop(k, None)
13694                                 mtimedb.commit()
13695
13696                         return 1
13697         else:
13698                 if ("--resume" in myopts):
13699                         print darkgreen("emerge: It seems we have nothing to resume...")
13700                         return os.EX_OK
13701
13702                 myparams = create_depgraph_params(myopts, myaction)
13703                 if "--quiet" not in myopts and "--nodeps" not in myopts:
13704                         print "Calculating dependencies  ",
13705                         sys.stdout.flush()
13706                 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
13707                 try:
13708                         retval, favorites = mydepgraph.select_files(myfiles)
13709                 except portage.exception.PackageNotFound, e:
13710                         portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
13711                         return 1
13712                 except portage.exception.PackageSetNotFound, e:
13713                         root_config = trees[settings["ROOT"]]["root_config"]
13714                         display_missing_pkg_set(root_config, e.value)
13715                         return 1
13716                 if show_spinner:
13717                         print "\b\b... done!"
13718                 if not retval:
13719                         mydepgraph.display_problems()
13720                         return 1
13721
13722         if "--pretend" not in myopts and \
13723                 ("--ask" in myopts or "--tree" in myopts or \
13724                 "--verbose" in myopts) and \
13725                 not ("--quiet" in myopts and "--ask" not in myopts):
13726                 if "--resume" in myopts:
13727                         mymergelist = mydepgraph.altlist()
13728                         if len(mymergelist) == 0:
13729                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
13730                                 return os.EX_OK
13731                         favorites = mtimedb["resume"]["favorites"]
13732                         retval = mydepgraph.display(
13733                                 mydepgraph.altlist(reversed=tree),
13734                                 favorites=favorites)
13735                         mydepgraph.display_problems()
13736                         if retval != os.EX_OK:
13737                                 return retval
13738                         prompt="Would you like to resume merging these packages?"
13739                 else:
13740                         retval = mydepgraph.display(
13741                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
13742                                 favorites=favorites)
13743                         mydepgraph.display_problems()
13744                         if retval != os.EX_OK:
13745                                 return retval
13746                         mergecount=0
13747                         for x in mydepgraph.altlist():
13748                                 if isinstance(x, Package) and x.operation == "merge":
13749                                         mergecount += 1
13750
13751                         if mergecount==0:
13752                                 sets = trees[settings["ROOT"]]["root_config"].sets
13753                                 world_candidates = None
13754                                 if "--noreplace" in myopts and \
13755                                         not oneshot and favorites:
13756                                         # Sets that are not world candidates are filtered
13757                                         # out here since the favorites list needs to be
13758                                         # complete for depgraph.loadResumeCommand() to
13759                                         # operate correctly.
13760                                         world_candidates = [x for x in favorites \
13761                                                 if not (x.startswith(SETPREFIX) and \
13762                                                 not sets[x[1:]].world_candidate)]
13763                                 if "--noreplace" in myopts and \
13764                                         not oneshot and world_candidates:
13765                                         print
13766                                         for x in world_candidates:
13767                                                 print " %s %s" % (good("*"), x)
13768                                         prompt="Would you like to add these packages to your world favorites?"
13769                                 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
13770                                         prompt="Nothing to merge; would you like to auto-clean packages?"
13771                                 else:
13772                                         print
13773                                         print "Nothing to merge; quitting."
13774                                         print
13775                                         return os.EX_OK
13776                         elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
13777                                 prompt="Would you like to fetch the source files for these packages?"
13778                         else:
13779                                 prompt="Would you like to merge these packages?"
13780                 print
13781                 if "--ask" in myopts and userquery(prompt) == "No":
13782                         print
13783                         print "Quitting."
13784                         print
13785                         return os.EX_OK
13786                 # Don't ask again (e.g. when auto-cleaning packages after merge)
13787                 myopts.pop("--ask", None)
13788
13789         if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
13790                 if ("--resume" in myopts):
13791                         mymergelist = mydepgraph.altlist()
13792                         if len(mymergelist) == 0:
13793                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
13794                                 return os.EX_OK
13795                         favorites = mtimedb["resume"]["favorites"]
13796                         retval = mydepgraph.display(
13797                                 mydepgraph.altlist(reversed=tree),
13798                                 favorites=favorites)
13799                         mydepgraph.display_problems()
13800                         if retval != os.EX_OK:
13801                                 return retval
13802                 else:
13803                         retval = mydepgraph.display(
13804                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
13805                                 favorites=favorites)
13806                         mydepgraph.display_problems()
13807                         if retval != os.EX_OK:
13808                                 return retval
13809                         if "--buildpkgonly" in myopts:
13810                                 graph_copy = mydepgraph.digraph.clone()
13811                                 for node in list(graph_copy.order):
13812                                         if not isinstance(node, Package):
13813                                                 graph_copy.remove(node)
13814                                 if not graph_copy.hasallzeros(ignore_priority=DepPriority.MEDIUM):
13815                                         print "\n!!! --buildpkgonly requires all dependencies to be merged."
13816                                         print "!!! You have to merge the dependencies before you can build this package.\n"
13817                                         return 1
13818         else:
13819                 if "--buildpkgonly" in myopts:
13820                         graph_copy = mydepgraph.digraph.clone()
13821                         for node in list(graph_copy.order):
13822                                 if not isinstance(node, Package):
13823                                         graph_copy.remove(node)
13824                         if not graph_copy.hasallzeros(ignore_priority=DepPriority.MEDIUM):
13825                                 print "\n!!! --buildpkgonly requires all dependencies to be merged."
13826                                 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
13827                                 return 1
13828
13829                 if ("--resume" in myopts):
13830                         favorites=mtimedb["resume"]["favorites"]
13831                         mymergelist = mydepgraph.altlist()
13832                         mydepgraph.break_refs(mymergelist)
13833                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
13834                                 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
13835                         del mydepgraph, mymergelist
13836                         clear_caches(trees)
13837
13838                         retval = mergetask.merge()
13839                         merge_count = mergetask.curval
13840                 else:
13841                         if "resume" in mtimedb and \
13842                         "mergelist" in mtimedb["resume"] and \
13843                         len(mtimedb["resume"]["mergelist"]) > 1:
13844                                 mtimedb["resume_backup"] = mtimedb["resume"]
13845                                 del mtimedb["resume"]
13846                                 mtimedb.commit()
13847                         mtimedb["resume"]={}
13848                         # Stored as a dict starting with portage-2.1.6_rc1, and supported
13849                         # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
13850                         # a list type for options.
13851                         mtimedb["resume"]["myopts"] = myopts.copy()
13852
13853                         # Convert Atom instances to plain str since the mtimedb loader
13854                         # sets unpickler.find_global = None which causes unpickler.load()
13855                         # to raise the following exception:
13856                         #
13857                         # cPickle.UnpicklingError: Global and instance pickles are not supported.
13858                         #
13859                         # TODO: Maybe stop setting find_global = None, or find some other
13860                         # way to avoid accidental triggering of the above UnpicklingError.
13861                         mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
13862
13863                         if ("--digest" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
13864                                 for pkgline in mydepgraph.altlist():
13865                                         if pkgline[0]=="ebuild" and pkgline[3]=="merge":
13866                                                 y = trees[pkgline[1]]["porttree"].dbapi.findname(pkgline[2])
13867                                                 tmpsettings = portage.config(clone=settings)
13868                                                 edebug = 0
13869                                                 if settings.get("PORTAGE_DEBUG", "") == "1":
13870                                                         edebug = 1
13871                                                 retval = portage.doebuild(
13872                                                         y, "digest", settings["ROOT"], tmpsettings, edebug,
13873                                                         ("--pretend" in myopts),
13874                                                         mydbapi=trees[pkgline[1]]["porttree"].dbapi,
13875                                                         tree="porttree")
13876
13877                         pkglist = mydepgraph.altlist()
13878                         mydepgraph.saveNomergeFavorites()
13879                         mydepgraph.break_refs(pkglist)
13880                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
13881                                 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
13882                         del mydepgraph, pkglist
13883                         clear_caches(trees)
13884
13885                         retval = mergetask.merge()
13886                         merge_count = mergetask.curval
13887
13888                 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
13889                         if "yes" == settings.get("AUTOCLEAN"):
13890                                 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
13891                                 unmerge(trees[settings["ROOT"]]["root_config"],
13892                                         myopts, "clean", [],
13893                                         ldpath_mtimes, autoclean=1)
13894                         else:
13895                                 portage.writemsg_stdout(colorize("WARN", "WARNING:")
13896                                         + " AUTOCLEAN is disabled.  This can cause serious"
13897                                         + " problems due to overlapping packages.\n")
13898                         trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
13899
13900                 return retval
13901
13902 def multiple_actions(action1, action2):
13903         sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
13904         sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
13905         sys.exit(1)
13906
13907 def insert_optional_args(args):
13908         """
13909         Parse optional arguments and insert a value if one has
13910         not been provided. This is done before feeding the args
13911         to the optparse parser since that parser does not support
13912         this feature natively.
13913         """
13914
13915         new_args = []
13916         jobs_opts = ("-j", "--jobs")
13917         arg_stack = args[:]
13918         arg_stack.reverse()
13919         while arg_stack:
13920                 arg = arg_stack.pop()
13921
13922                 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
13923                 if not (short_job_opt or arg in jobs_opts):
13924                         new_args.append(arg)
13925                         continue
13926
13927                 # Insert an empty placeholder in order to
13928                 # satisfy the requirements of optparse.
13929
13930                 new_args.append("--jobs")
13931                 job_count = None
13932                 saved_opts = None
13933                 if short_job_opt and len(arg) > 2:
13934                         if arg[:2] == "-j":
13935                                 try:
13936                                         job_count = int(arg[2:])
13937                                 except ValueError:
13938                                         saved_opts = arg[2:]
13939                         else:
13940                                 job_count = "True"
13941                                 saved_opts = arg[1:].replace("j", "")
13942
13943                 if job_count is None and arg_stack:
13944                         try:
13945                                 job_count = int(arg_stack[-1])
13946                         except ValueError:
13947                                 pass
13948                         else:
13949                                 # Discard the job count from the stack
13950                                 # since we're consuming it here.
13951                                 arg_stack.pop()
13952
13953                 if job_count is None:
13954                         # unlimited number of jobs
13955                         new_args.append("True")
13956                 else:
13957                         new_args.append(str(job_count))
13958
13959                 if saved_opts is not None:
13960                         new_args.append("-" + saved_opts)
13961
13962         return new_args
13963
13964 def parse_opts(tmpcmdline, silent=False):
13965         myaction=None
13966         myopts = {}
13967         myfiles=[]
13968
13969         global actions, options, shortmapping
13970
13971         longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
13972         argument_options = {
13973                 "--config-root": {
13974                         "help":"specify the location for portage configuration files",
13975                         "action":"store"
13976                 },
13977                 "--color": {
13978                         "help":"enable or disable color output",
13979                         "type":"choice",
13980                         "choices":("y", "n")
13981                 },
13982
13983                 "--jobs": {
13984
13985                         "help"   : "Specifies the number of packages to build " + \
13986                                 "simultaneously.",
13987
13988                         "action" : "store"
13989                 },
13990
13991                 "--load-average": {
13992
13993                         "help"   :"Specifies that no new builds should be started " + \
13994                                 "if there are other builds running and the load average " + \
13995                                 "is at least LOAD (a floating-point number).",
13996
13997                         "action" : "store"
13998                 },
13999
14000                 "--with-bdeps": {
14001                         "help":"include unnecessary build time dependencies",
14002                         "type":"choice",
14003                         "choices":("y", "n")
14004                 },
14005                 "--reinstall": {
14006                         "help":"specify conditions to trigger package reinstallation",
14007                         "type":"choice",
14008                         "choices":["changed-use"]
14009                 }
14010         }
14011
14012         from optparse import OptionParser
14013         parser = OptionParser()
14014         if parser.has_option("--help"):
14015                 parser.remove_option("--help")
14016
14017         for action_opt in actions:
14018                 parser.add_option("--" + action_opt, action="store_true",
14019                         dest=action_opt.replace("-", "_"), default=False)
14020         for myopt in options:
14021                 parser.add_option(myopt, action="store_true",
14022                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
14023         for shortopt, longopt in shortmapping.iteritems():
14024                 parser.add_option("-" + shortopt, action="store_true",
14025                         dest=longopt.lstrip("--").replace("-", "_"), default=False)
14026         for myalias, myopt in longopt_aliases.iteritems():
14027                 parser.add_option(myalias, action="store_true",
14028                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
14029
14030         for myopt, kwargs in argument_options.iteritems():
14031                 parser.add_option(myopt,
14032                         dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
14033
14034         tmpcmdline = insert_optional_args(tmpcmdline)
14035
14036         myoptions, myargs = parser.parse_args(args=tmpcmdline)
14037
14038         if myoptions.jobs:
14039                 jobs = None
14040                 if myoptions.jobs == "True":
14041                         jobs = True
14042                 else:
14043                         try:
14044                                 jobs = int(myoptions.jobs)
14045                         except ValueError:
14046                                 jobs = -1
14047
14048                 if jobs is not True and \
14049                         jobs < 1:
14050                         jobs = None
14051                         if not silent:
14052                                 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
14053                                         (myoptions.jobs,), noiselevel=-1)
14054
14055                 myoptions.jobs = jobs
14056
14057         if myoptions.load_average:
14058                 try:
14059                         load_average = float(myoptions.load_average)
14060                 except ValueError:
14061                         load_average = 0.0
14062
14063                 if load_average <= 0.0:
14064                         load_average = None
14065                         if not silent:
14066                                 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
14067                                         (myoptions.load_average,), noiselevel=-1)
14068
14069                 myoptions.load_average = load_average
14070
14071         for myopt in options:
14072                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
14073                 if v:
14074                         myopts[myopt] = True
14075
14076         for myopt in argument_options:
14077                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
14078                 if v is not None:
14079                         myopts[myopt] = v
14080
14081         for action_opt in actions:
14082                 v = getattr(myoptions, action_opt.replace("-", "_"))
14083                 if v:
14084                         if myaction:
14085                                 multiple_actions(myaction, action_opt)
14086                                 sys.exit(1)
14087                         myaction = action_opt
14088
14089         myfiles += myargs
14090
14091         return myaction, myopts, myfiles
14092
14093 def validate_ebuild_environment(trees):
14094         for myroot in trees:
14095                 settings = trees[myroot]["vartree"].settings
14096                 settings.validate()
14097
14098 def clear_caches(trees):
14099         for d in trees.itervalues():
14100                 d["porttree"].dbapi.melt()
14101                 d["porttree"].dbapi._aux_cache.clear()
14102                 d["bintree"].dbapi._aux_cache.clear()
14103                 d["bintree"].dbapi._clear_cache()
14104                 d["vartree"].dbapi.linkmap._clear_cache()
14105         portage.dircache.clear()
14106         gc.collect()
14107
14108 def load_emerge_config(trees=None):
14109         kwargs = {}
14110         for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
14111                 v = os.environ.get(envvar, None)
14112                 if v and v.strip():
14113                         kwargs[k] = v
14114         trees = portage.create_trees(trees=trees, **kwargs)
14115
14116         for root, root_trees in trees.iteritems():
14117                 settings = root_trees["vartree"].settings
14118                 setconfig = load_default_config(settings, root_trees)
14119                 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
14120
14121         settings = trees["/"]["vartree"].settings
14122
14123         for myroot in trees:
14124                 if myroot != "/":
14125                         settings = trees[myroot]["vartree"].settings
14126                         break
14127
14128         mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
14129         mtimedb = portage.MtimeDB(mtimedbfile)
14130         
14131         return settings, trees, mtimedb
14132
14133 def adjust_config(myopts, settings):
14134         """Make emerge specific adjustments to the config."""
14135
14136         # To enhance usability, make some vars case insensitive by forcing them to
14137         # lower case.
14138         for myvar in ("AUTOCLEAN", "NOCOLOR"):
14139                 if myvar in settings:
14140                         settings[myvar] = settings[myvar].lower()
14141                         settings.backup_changes(myvar)
14142         del myvar
14143
14144         # Kill noauto as it will break merges otherwise.
14145         if "noauto" in settings.features:
14146                 while "noauto" in settings.features:
14147                         settings.features.remove("noauto")
14148                 settings["FEATURES"] = " ".join(settings.features)
14149                 settings.backup_changes("FEATURES")
14150
14151         CLEAN_DELAY = 5
14152         try:
14153                 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
14154         except ValueError, e:
14155                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14156                 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
14157                         settings["CLEAN_DELAY"], noiselevel=-1)
14158         settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
14159         settings.backup_changes("CLEAN_DELAY")
14160
14161         EMERGE_WARNING_DELAY = 10
14162         try:
14163                 EMERGE_WARNING_DELAY = int(settings.get(
14164                         "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
14165         except ValueError, e:
14166                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14167                 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
14168                         settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
14169         settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
14170         settings.backup_changes("EMERGE_WARNING_DELAY")
14171
14172         if "--quiet" in myopts:
14173                 settings["PORTAGE_QUIET"]="1"
14174                 settings.backup_changes("PORTAGE_QUIET")
14175
14176         if "--verbose" in myopts:
14177                 settings["PORTAGE_VERBOSE"] = "1"
14178                 settings.backup_changes("PORTAGE_VERBOSE")
14179
14180         # Set so that configs will be merged regardless of remembered status
14181         if ("--noconfmem" in myopts):
14182                 settings["NOCONFMEM"]="1"
14183                 settings.backup_changes("NOCONFMEM")
14184
14185         # Set various debug markers... They should be merged somehow.
14186         PORTAGE_DEBUG = 0
14187         try:
14188                 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
14189                 if PORTAGE_DEBUG not in (0, 1):
14190                         portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
14191                                 PORTAGE_DEBUG, noiselevel=-1)
14192                         portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
14193                                 noiselevel=-1)
14194                         PORTAGE_DEBUG = 0
14195         except ValueError, e:
14196                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14197                 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
14198                         settings["PORTAGE_DEBUG"], noiselevel=-1)
14199                 del e
14200         if "--debug" in myopts:
14201                 PORTAGE_DEBUG = 1
14202         settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
14203         settings.backup_changes("PORTAGE_DEBUG")
14204
14205         if settings.get("NOCOLOR") not in ("yes","true"):
14206                 portage.output.havecolor = 1
14207
14208         """The explicit --color < y | n > option overrides the NOCOLOR environment
14209         variable and stdout auto-detection."""
14210         if "--color" in myopts:
14211                 if "y" == myopts["--color"]:
14212                         portage.output.havecolor = 1
14213                         settings["NOCOLOR"] = "false"
14214                 else:
14215                         portage.output.havecolor = 0
14216                         settings["NOCOLOR"] = "true"
14217                 settings.backup_changes("NOCOLOR")
14218         elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
14219                 portage.output.havecolor = 0
14220                 settings["NOCOLOR"] = "true"
14221                 settings.backup_changes("NOCOLOR")
14222
14223 def apply_priorities(settings):
14224         ionice(settings)
14225         nice(settings)
14226
14227 def nice(settings):
14228         try:
14229                 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
14230         except (OSError, ValueError), e:
14231                 out = portage.output.EOutput()
14232                 out.eerror("Failed to change nice value to '%s'" % \
14233                         settings["PORTAGE_NICENESS"])
14234                 out.eerror("%s\n" % str(e))
14235
14236 def ionice(settings):
14237
14238         ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
14239         if ionice_cmd:
14240                 ionice_cmd = shlex.split(ionice_cmd)
14241         if not ionice_cmd:
14242                 return
14243
14244         from portage.util import varexpand
14245         variables = {"PID" : str(os.getpid())}
14246         cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
14247
14248         try:
14249                 rval = portage.process.spawn(cmd, env=os.environ)
14250         except portage.exception.CommandNotFound:
14251                 # The OS kernel probably doesn't support ionice,
14252                 # so return silently.
14253                 return
14254
14255         if rval != os.EX_OK:
14256                 out = portage.output.EOutput()
14257                 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
14258                 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
14259
14260 def display_missing_pkg_set(root_config, set_name):
14261
14262         msg = []
14263         msg.append(("emerge: There are no sets to satisfy '%s'. " + \
14264                 "The following sets exist:") % \
14265                 colorize("INFORM", set_name))
14266         msg.append("")
14267
14268         for s in sorted(root_config.sets):
14269                 msg.append("    %s" % s)
14270         msg.append("")
14271
14272         writemsg_level("".join("%s\n" % l for l in msg),
14273                 level=logging.ERROR, noiselevel=-1)
14274
14275 def expand_set_arguments(myfiles, myaction, root_config):
14276         retval = os.EX_OK
14277         setconfig = root_config.setconfig
14278
14279         sets = setconfig.getSets()
14280
14281         # In order to know exactly which atoms/sets should be added to the
14282         # world file, the depgraph performs set expansion later. It will get
14283         # confused about where the atoms came from if it's not allowed to
14284         # expand them itself.
14285         do_not_expand = (None, )
14286         newargs = []
14287         for a in myfiles:
14288                 if a in ("system", "world"):
14289                         newargs.append(SETPREFIX+a)
14290                 else:
14291                         newargs.append(a)
14292         myfiles = newargs
14293         del newargs
14294         newargs = []
14295
14296         # separators for set arguments
14297         ARG_START = "{"
14298         ARG_END = "}"
14299
14300         # WARNING: all operators must be of equal length
14301         IS_OPERATOR = "/@"
14302         DIFF_OPERATOR = "-@"
14303         UNION_OPERATOR = "+@"
14304         
14305         for i in range(0, len(myfiles)):
14306                 if myfiles[i].startswith(SETPREFIX):
14307                         start = 0
14308                         end = 0
14309                         x = myfiles[i][len(SETPREFIX):]
14310                         newset = ""
14311                         while x:
14312                                 start = x.find(ARG_START)
14313                                 end = x.find(ARG_END)
14314                                 if start > 0 and start < end:
14315                                         namepart = x[:start]
14316                                         argpart = x[start+1:end]
14317                                 
14318                                         # TODO: implement proper quoting
14319                                         args = argpart.split(",")
14320                                         options = {}
14321                                         for a in args:
14322                                                 if "=" in a:
14323                                                         k, v  = a.split("=", 1)
14324                                                         options[k] = v
14325                                                 else:
14326                                                         options[a] = "True"
14327                                         setconfig.update(namepart, options)
14328                                         newset += (x[:start-len(namepart)]+namepart)
14329                                         x = x[end+len(ARG_END):]
14330                                 else:
14331                                         newset += x
14332                                         x = ""
14333                         myfiles[i] = SETPREFIX+newset
14334                                 
14335         sets = setconfig.getSets()
14336
14337         # display errors that occured while loading the SetConfig instance
14338         for e in setconfig.errors:
14339                 print colorize("BAD", "Error during set creation: %s" % e)
14340         
14341         # emerge relies on the existance of sets with names "world" and "system"
14342         required_sets = ("world", "system")
14343         missing_sets = []
14344
14345         for s in required_sets:
14346                 if s not in sets:
14347                         missing_sets.append(s)
14348         if missing_sets:
14349                 if len(missing_sets) > 2:
14350                         missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
14351                         missing_sets_str += ', and "%s"' % missing_sets[-1]
14352                 elif len(missing_sets) == 2:
14353                         missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
14354                 else:
14355                         missing_sets_str = '"%s"' % missing_sets[-1]
14356                 msg = ["emerge: incomplete set configuration, " + \
14357                         "missing set(s): %s" % missing_sets_str]
14358                 if sets:
14359                         msg.append("        sets defined: %s" % ", ".join(sets))
14360                 msg.append("        This usually means that '%s'" % \
14361                         (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
14362                 msg.append("        is missing or corrupt.")
14363                 for line in msg:
14364                         writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
14365                 return (None, 1)
14366         unmerge_actions = ("unmerge", "prune", "clean", "depclean")
14367
14368         for a in myfiles:
14369                 if a.startswith(SETPREFIX):
14370                         # support simple set operations (intersection, difference and union)
14371                         # on the commandline. Expressions are evaluated strictly left-to-right
14372                         if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
14373                                 expression = a[len(SETPREFIX):]
14374                                 expr_sets = []
14375                                 expr_ops = []
14376                                 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
14377                                         is_pos = expression.rfind(IS_OPERATOR)
14378                                         diff_pos = expression.rfind(DIFF_OPERATOR)
14379                                         union_pos = expression.rfind(UNION_OPERATOR)
14380                                         op_pos = max(is_pos, diff_pos, union_pos)
14381                                         s1 = expression[:op_pos]
14382                                         s2 = expression[op_pos+len(IS_OPERATOR):]
14383                                         op = expression[op_pos:op_pos+len(IS_OPERATOR)]
14384                                         if not s2 in sets:
14385                                                 display_missing_pkg_set(root_config, s2)
14386                                                 return (None, 1)
14387                                         expr_sets.insert(0, s2)
14388                                         expr_ops.insert(0, op)
14389                                         expression = s1
14390                                 if not expression in sets:
14391                                         display_missing_pkg_set(root_config, expression)
14392                                         return (None, 1)
14393                                 expr_sets.insert(0, expression)
14394                                 result = set(setconfig.getSetAtoms(expression))
14395                                 for i in range(0, len(expr_ops)):
14396                                         s2 = setconfig.getSetAtoms(expr_sets[i+1])
14397                                         if expr_ops[i] == IS_OPERATOR:
14398                                                 result.intersection_update(s2)
14399                                         elif expr_ops[i] == DIFF_OPERATOR:
14400                                                 result.difference_update(s2)
14401                                         elif expr_ops[i] == UNION_OPERATOR:
14402                                                 result.update(s2)
14403                                         else:
14404                                                 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
14405                                 newargs.extend(result)
14406                         else:                   
14407                                 s = a[len(SETPREFIX):]
14408                                 if s not in sets:
14409                                         display_missing_pkg_set(root_config, s)
14410                                         return (None, 1)
14411                                 setconfig.active.append(s)
14412                                 try:
14413                                         set_atoms = setconfig.getSetAtoms(s)
14414                                 except portage.exception.PackageSetNotFound, e:
14415                                         writemsg_level(("emerge: the given set '%s' " + \
14416                                                 "contains a non-existent set named '%s'.\n") % \
14417                                                 (s, e), level=logging.ERROR, noiselevel=-1)
14418                                         return (None, 1)
14419                                 if myaction in unmerge_actions and \
14420                                                 not sets[s].supportsOperation("unmerge"):
14421                                         sys.stderr.write("emerge: the given set '%s' does " % s + \
14422                                                 "not support unmerge operations\n")
14423                                         retval = 1
14424                                 elif not set_atoms:
14425                                         print "emerge: '%s' is an empty set" % s
14426                                 elif myaction not in do_not_expand:
14427                                         newargs.extend(set_atoms)
14428                                 else:
14429                                         newargs.append(SETPREFIX+s)
14430                                 for e in sets[s].errors:
14431                                         print e
14432                 else:
14433                         newargs.append(a)
14434         return (newargs, retval)
14435
14436 def repo_name_check(trees):
14437         missing_repo_names = set()
14438         for root, root_trees in trees.iteritems():
14439                 if "porttree" in root_trees:
14440                         portdb = root_trees["porttree"].dbapi
14441                         missing_repo_names.update(portdb.porttrees)
14442                         repos = portdb.getRepositories()
14443                         for r in repos:
14444                                 missing_repo_names.discard(portdb.getRepositoryPath(r))
14445                         if portdb.porttree_root in missing_repo_names and \
14446                                 not os.path.exists(os.path.join(
14447                                 portdb.porttree_root, "profiles")):
14448                                 # This is normal if $PORTDIR happens to be empty,
14449                                 # so don't warn about it.
14450                                 missing_repo_names.remove(portdb.porttree_root)
14451
14452         if missing_repo_names:
14453                 msg = []
14454                 msg.append("WARNING: One or more repositories " + \
14455                         "have missing repo_name entries:")
14456                 msg.append("")
14457                 for p in missing_repo_names:
14458                         msg.append("\t%s/profiles/repo_name" % (p,))
14459                 msg.append("")
14460                 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
14461                         "should be a plain text file containing a unique " + \
14462                         "name for the repository on the first line.", 70))
14463                 writemsg_level("".join("%s\n" % l for l in msg),
14464                         level=logging.WARNING, noiselevel=-1)
14465
14466         return bool(missing_repo_names)
14467
14468 def config_protect_check(trees):
14469         for root, root_trees in trees.iteritems():
14470                 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
14471                         msg = "!!! CONFIG_PROTECT is empty"
14472                         if root != "/":
14473                                 msg += " for '%s'" % root
14474                         writemsg_level(msg, level=logging.WARN, noiselevel=-1)
14475
14476 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
14477
14478         if "--quiet" in myopts:
14479                 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
14480                 print "!!! one of the following fully-qualified ebuild names instead:\n"
14481                 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
14482                         print "    " + colorize("INFORM", cp)
14483                 return
14484
14485         s = search(root_config, spinner, "--searchdesc" in myopts,
14486                 "--quiet" not in myopts, "--usepkg" in myopts,
14487                 "--usepkgonly" in myopts)
14488         null_cp = portage.dep_getkey(insert_category_into_atom(
14489                 arg, "null"))
14490         cat, atom_pn = portage.catsplit(null_cp)
14491         s.searchkey = atom_pn
14492         for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
14493                 s.addCP(cp)
14494         s.output()
14495         print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
14496         print "!!! one of the above fully-qualified ebuild names instead.\n"
14497
14498 def profile_check(trees, myaction, myopts):
14499         if myaction in ("info", "sync"):
14500                 return os.EX_OK
14501         elif "--version" in myopts or "--help" in myopts:
14502                 return os.EX_OK
14503         for root, root_trees in trees.iteritems():
14504                 if root_trees["root_config"].settings.profiles:
14505                         continue
14506                 # generate some profile related warning messages
14507                 validate_ebuild_environment(trees)
14508                 msg = "If you have just changed your profile configuration, you " + \
14509                         "should revert back to the previous configuration. Due to " + \
14510                         "your current profile being invalid, allowed actions are " + \
14511                         "limited to --help, --info, --sync, and --version."
14512                 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
14513                         level=logging.ERROR, noiselevel=-1)
14514                 return 1
14515         return os.EX_OK
14516
14517 def emerge_main():
14518         global portage  # NFC why this is necessary now - genone
14519         portage._disable_legacy_globals()
14520         # Disable color until we're sure that it should be enabled (after
14521         # EMERGE_DEFAULT_OPTS has been parsed).
14522         portage.output.havecolor = 0
14523         # This first pass is just for options that need to be known as early as
14524         # possible, such as --config-root.  They will be parsed again later,
14525         # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
14526         # the value of --config-root).
14527         myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
14528         if "--debug" in myopts:
14529                 os.environ["PORTAGE_DEBUG"] = "1"
14530         if "--config-root" in myopts:
14531                 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
14532
14533         # Portage needs to ensure a sane umask for the files it creates.
14534         os.umask(022)
14535         settings, trees, mtimedb = load_emerge_config()
14536         portdb = trees[settings["ROOT"]]["porttree"].dbapi
14537         rval = profile_check(trees, myaction, myopts)
14538         if rval != os.EX_OK:
14539                 return rval
14540
14541         if portage._global_updates(trees, mtimedb["updates"]):
14542                 mtimedb.commit()
14543                 # Reload the whole config from scratch.
14544                 settings, trees, mtimedb = load_emerge_config(trees=trees)
14545                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
14546
14547         xterm_titles = "notitles" not in settings.features
14548
14549         tmpcmdline = []
14550         if "--ignore-default-opts" not in myopts:
14551                 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
14552         tmpcmdline.extend(sys.argv[1:])
14553         myaction, myopts, myfiles = parse_opts(tmpcmdline)
14554
14555         if "--digest" in myopts:
14556                 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
14557                 # Reload the whole config from scratch so that the portdbapi internal
14558                 # config is updated with new FEATURES.
14559                 settings, trees, mtimedb = load_emerge_config(trees=trees)
14560                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
14561
14562         for myroot in trees:
14563                 mysettings =  trees[myroot]["vartree"].settings
14564                 mysettings.unlock()
14565                 adjust_config(myopts, mysettings)
14566                 mysettings["PORTAGE_COUNTER_HASH"] = \
14567                         trees[myroot]["vartree"].dbapi._counter_hash()
14568                 mysettings.backup_changes("PORTAGE_COUNTER_HASH")
14569                 mysettings.lock()
14570                 del myroot, mysettings
14571
14572         apply_priorities(settings)
14573
14574         spinner = stdout_spinner()
14575         if "candy" in settings.features:
14576                 spinner.update = spinner.update_scroll
14577
14578         if "--quiet" not in myopts:
14579                 portage.deprecated_profile_check(settings=settings)
14580                 repo_name_check(trees)
14581                 config_protect_check(trees)
14582
14583         eclasses_overridden = {}
14584         for mytrees in trees.itervalues():
14585                 mydb = mytrees["porttree"].dbapi
14586                 # Freeze the portdbapi for performance (memoize all xmatch results).
14587                 mydb.freeze()
14588                 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
14589         del mytrees, mydb
14590
14591         if eclasses_overridden and \
14592                 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
14593                 prefix = bad(" * ")
14594                 if len(eclasses_overridden) == 1:
14595                         writemsg(prefix + "Overlay eclass overrides " + \
14596                                 "eclass from PORTDIR:\n", noiselevel=-1)
14597                 else:
14598                         writemsg(prefix + "Overlay eclasses override " + \
14599                                 "eclasses from PORTDIR:\n", noiselevel=-1)
14600                 writemsg(prefix + "\n", noiselevel=-1)
14601                 for eclass_name in sorted(eclasses_overridden):
14602                         writemsg(prefix + "  '%s/%s.eclass'\n" % \
14603                                 (eclasses_overridden[eclass_name], eclass_name),
14604                                 noiselevel=-1)
14605                 writemsg(prefix + "\n", noiselevel=-1)
14606                 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
14607                 "because it will trigger invalidation of cached ebuild metadata " + \
14608                 "that is distributed with the portage tree. If you must " + \
14609                 "override eclasses from PORTDIR then you are advised to add " + \
14610                 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
14611                 "`emerge --regen` after each time that you run `emerge --sync`. " + \
14612                 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
14613                 "you would like to disable this warning."
14614                 from textwrap import wrap
14615                 for line in wrap(msg, 72):
14616                         writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
14617
14618         if "moo" in myfiles:
14619                 print """
14620
14621   Larry loves Gentoo (""" + platform.system() + """)
14622
14623  _______________________
14624 < Have you mooed today? >
14625  -----------------------
14626         \   ^__^
14627          \  (oo)\_______
14628             (__)\       )\/\ 
14629                 ||----w |
14630                 ||     ||
14631
14632 """
14633
14634         for x in myfiles:
14635                 ext = os.path.splitext(x)[1]
14636                 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
14637                         print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
14638                         break
14639
14640         root_config = trees[settings["ROOT"]]["root_config"]
14641         if myaction == "list-sets":
14642                 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
14643                 sys.stdout.flush()
14644                 return os.EX_OK
14645
14646         # only expand sets for actions taking package arguments
14647         oldargs = myfiles[:]
14648         if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
14649                 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
14650                 if retval != os.EX_OK:
14651                         return retval
14652
14653                 # Need to handle empty sets specially, otherwise emerge will react 
14654                 # with the help message for empty argument lists
14655                 if oldargs and not myfiles:
14656                         print "emerge: no targets left after set expansion"
14657                         return 0
14658
14659         if ("--tree" in myopts) and ("--columns" in myopts):
14660                 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
14661                 return 1
14662
14663         if ("--quiet" in myopts):
14664                 spinner.update = spinner.update_quiet
14665                 portage.util.noiselimit = -1
14666
14667         # Always create packages if FEATURES=buildpkg
14668         # Imply --buildpkg if --buildpkgonly
14669         if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
14670                 if "--buildpkg" not in myopts:
14671                         myopts["--buildpkg"] = True
14672
14673         # Also allow -S to invoke search action (-sS)
14674         if ("--searchdesc" in myopts):
14675                 if myaction and myaction != "search":
14676                         myfiles.append(myaction)
14677                 if "--search" not in myopts:
14678                         myopts["--search"] = True
14679                 myaction = "search"
14680
14681         # Always try and fetch binary packages if FEATURES=getbinpkg
14682         if ("getbinpkg" in settings.features):
14683                 myopts["--getbinpkg"] = True
14684
14685         if "--buildpkgonly" in myopts:
14686                 # --buildpkgonly will not merge anything, so
14687                 # it cancels all binary package options.
14688                 for opt in ("--getbinpkg", "--getbinpkgonly",
14689                         "--usepkg", "--usepkgonly"):
14690                         myopts.pop(opt, None)
14691
14692         if "--fetch-all-uri" in myopts:
14693                 myopts["--fetchonly"] = True
14694
14695         if "--skipfirst" in myopts and "--resume" not in myopts:
14696                 myopts["--resume"] = True
14697
14698         if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
14699                 myopts["--usepkgonly"] = True
14700
14701         if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
14702                 myopts["--getbinpkg"] = True
14703
14704         if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
14705                 myopts["--usepkg"] = True
14706
14707         # Also allow -K to apply --usepkg/-k
14708         if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
14709                 myopts["--usepkg"] = True
14710
14711         # Allow -p to remove --ask
14712         if ("--pretend" in myopts) and ("--ask" in myopts):
14713                 print ">>> --pretend disables --ask... removing --ask from options."
14714                 del myopts["--ask"]
14715
14716         # forbid --ask when not in a terminal
14717         # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
14718         if ("--ask" in myopts) and (not sys.stdin.isatty()):
14719                 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
14720                         noiselevel=-1)
14721                 return 1
14722
14723         if settings.get("PORTAGE_DEBUG", "") == "1":
14724                 spinner.update = spinner.update_quiet
14725                 portage.debug=1
14726                 if "python-trace" in settings.features:
14727                         import portage.debug
14728                         portage.debug.set_trace(True)
14729
14730         if not ("--quiet" in myopts):
14731                 if not sys.stdout.isatty() or ("--nospinner" in myopts):
14732                         spinner.update = spinner.update_basic
14733
14734         if "--version" in myopts:
14735                 print getportageversion(settings["PORTDIR"], settings["ROOT"],
14736                         settings.profile_path, settings["CHOST"],
14737                         trees[settings["ROOT"]]["vartree"].dbapi)
14738                 return 0
14739         elif "--help" in myopts:
14740                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
14741                 return 0
14742
14743         if "--debug" in myopts:
14744                 print "myaction", myaction
14745                 print "myopts", myopts
14746
14747         if not myaction and not myfiles and "--resume" not in myopts:
14748                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
14749                 return 1
14750
14751         pretend = "--pretend" in myopts
14752         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14753         buildpkgonly = "--buildpkgonly" in myopts
14754
14755         # check if root user is the current user for the actions where emerge needs this
14756         if portage.secpass < 2:
14757                 # We've already allowed "--version" and "--help" above.
14758                 if "--pretend" not in myopts and myaction not in ("search","info"):
14759                         need_superuser = not \
14760                                 (fetchonly or \
14761                                 (buildpkgonly and secpass >= 1) or \
14762                                 myaction in ("metadata", "regen") or \
14763                                 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
14764                         if portage.secpass < 1 or \
14765                                 need_superuser:
14766                                 if need_superuser:
14767                                         access_desc = "superuser"
14768                                 else:
14769                                         access_desc = "portage group"
14770                                 # Always show portage_group_warning() when only portage group
14771                                 # access is required but the user is not in the portage group.
14772                                 from portage.data import portage_group_warning
14773                                 if "--ask" in myopts:
14774                                         myopts["--pretend"] = True
14775                                         del myopts["--ask"]
14776                                         print ("%s access is required... " + \
14777                                                 "adding --pretend to options.\n") % access_desc
14778                                         if portage.secpass < 1 and not need_superuser:
14779                                                 portage_group_warning()
14780                                 else:
14781                                         sys.stderr.write(("emerge: %s access is " + \
14782                                                 "required.\n\n") % access_desc)
14783                                         if portage.secpass < 1 and not need_superuser:
14784                                                 portage_group_warning()
14785                                         return 1
14786
14787         disable_emergelog = False
14788         for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
14789                 if x in myopts:
14790                         disable_emergelog = True
14791                         break
14792         if myaction in ("search", "info"):
14793                 disable_emergelog = True
14794         if disable_emergelog:
14795                 """ Disable emergelog for everything except build or unmerge
14796                 operations.  This helps minimize parallel emerge.log entries that can
14797                 confuse log parsers.  We especially want it disabled during
14798                 parallel-fetch, which uses --resume --fetchonly."""
14799                 global emergelog
14800                 def emergelog(*pargs, **kargs):
14801                         pass
14802
14803         if not "--pretend" in myopts:
14804                 emergelog(xterm_titles, "Started emerge on: "+\
14805                         time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
14806                 myelogstr=""
14807                 if myopts:
14808                         myelogstr=" ".join(myopts)
14809                 if myaction:
14810                         myelogstr+=" "+myaction
14811                 if myfiles:
14812                         myelogstr += " " + " ".join(oldargs)
14813                 emergelog(xterm_titles, " *** emerge " + myelogstr)
14814         del oldargs
14815
14816         def emergeexitsig(signum, frame):
14817                 signal.signal(signal.SIGINT, signal.SIG_IGN)
14818                 signal.signal(signal.SIGTERM, signal.SIG_IGN)
14819                 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
14820                 sys.exit(100+signum)
14821         signal.signal(signal.SIGINT, emergeexitsig)
14822         signal.signal(signal.SIGTERM, emergeexitsig)
14823
14824         def emergeexit():
14825                 """This gets out final log message in before we quit."""
14826                 if "--pretend" not in myopts:
14827                         emergelog(xterm_titles, " *** terminating.")
14828                 if "notitles" not in settings.features:
14829                         xtermTitleReset()
14830         portage.atexit_register(emergeexit)
14831
14832         if myaction in ("config", "metadata", "regen", "sync"):
14833                 if "--pretend" in myopts:
14834                         sys.stderr.write(("emerge: The '%s' action does " + \
14835                                 "not support '--pretend'.\n") % myaction)
14836                         return 1
14837
14838         if "sync" == myaction:
14839                 return action_sync(settings, trees, mtimedb, myopts, myaction)
14840         elif "metadata" == myaction:
14841                 action_metadata(settings, portdb, myopts)
14842         elif myaction=="regen":
14843                 validate_ebuild_environment(trees)
14844                 action_regen(settings, portdb, myopts.get("--jobs"),
14845                         myopts.get("--load-average"))
14846         # HELP action
14847         elif "config"==myaction:
14848                 validate_ebuild_environment(trees)
14849                 action_config(settings, trees, myopts, myfiles)
14850
14851         # SEARCH action
14852         elif "search"==myaction:
14853                 validate_ebuild_environment(trees)
14854                 action_search(trees[settings["ROOT"]]["root_config"],
14855                         myopts, myfiles, spinner)
14856         elif myaction in ("clean", "unmerge") or \
14857                 (myaction == "prune" and "--nodeps" in myopts):
14858                 validate_ebuild_environment(trees)
14859
14860                 # Ensure atoms are valid before calling unmerge().
14861                 # For backward compat, leading '=' is not required.
14862                 for x in myfiles:
14863                         if is_valid_package_atom(x) or \
14864                                 is_valid_package_atom("=" + x):
14865                                 continue
14866                         msg = []
14867                         msg.append("'%s' is not a valid package atom." % (x,))
14868                         msg.append("Please check ebuild(5) for full details.")
14869                         writemsg_level("".join("!!! %s\n" % line for line in msg),
14870                                 level=logging.ERROR, noiselevel=-1)
14871                         return 1
14872
14873                 # When given a list of atoms, unmerge
14874                 # them in the order given.
14875                 ordered = myaction == "unmerge"
14876                 if 1 == unmerge(root_config, myopts, myaction, myfiles,
14877                         mtimedb["ldpath"], ordered=ordered):
14878                         if not (buildpkgonly or fetchonly or pretend):
14879                                 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
14880
14881         elif myaction in ("depclean", "info", "prune"):
14882
14883                 # Ensure atoms are valid before calling unmerge().
14884                 vardb = trees[settings["ROOT"]]["vartree"].dbapi
14885                 valid_atoms = []
14886                 for x in myfiles:
14887                         if is_valid_package_atom(x):
14888                                 try:
14889                                         valid_atoms.append(
14890                                                 portage.dep_expand(x, mydb=vardb, settings=settings))
14891                                 except portage.exception.AmbiguousPackageName, e:
14892                                         msg = "The short ebuild name \"" + x + \
14893                                                 "\" is ambiguous.  Please specify " + \
14894                                                 "one of the following " + \
14895                                                 "fully-qualified ebuild names instead:"
14896                                         for line in textwrap.wrap(msg, 70):
14897                                                 writemsg_level("!!! %s\n" % (line,),
14898                                                         level=logging.ERROR, noiselevel=-1)
14899                                         for i in e[0]:
14900                                                 writemsg_level("    %s\n" % colorize("INFORM", i),
14901                                                         level=logging.ERROR, noiselevel=-1)
14902                                         writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
14903                                         return 1
14904                                 continue
14905                         msg = []
14906                         msg.append("'%s' is not a valid package atom." % (x,))
14907                         msg.append("Please check ebuild(5) for full details.")
14908                         writemsg_level("".join("!!! %s\n" % line for line in msg),
14909                                 level=logging.ERROR, noiselevel=-1)
14910                         return 1
14911
14912                 if myaction == "info":
14913                         return action_info(settings, trees, myopts, valid_atoms)
14914
14915                 validate_ebuild_environment(trees)
14916                 action_depclean(settings, trees, mtimedb["ldpath"],
14917                         myopts, myaction, valid_atoms, spinner)
14918                 if not (buildpkgonly or fetchonly or pretend):
14919                         post_emerge(root_config, myopts, mtimedb, os.EX_OK)
14920         # "update", "system", or just process files:
14921         else:
14922                 validate_ebuild_environment(trees)
14923                 if "--pretend" not in myopts:
14924                         display_news_notification(root_config, myopts)
14925                 retval = action_build(settings, trees, mtimedb,
14926                         myopts, myaction, myfiles, spinner)
14927                 root_config = trees[settings["ROOT"]]["root_config"]
14928                 post_emerge(root_config, myopts, mtimedb, retval)
14929
14930                 return retval