In traversing deps to add to Scheduler._unsatisfied_system_deps, only traverse
[portage.git] / pym / _emerge / __init__.py
1 #!/usr/bin/python -O
2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id$
5
6 import array
7 from collections import deque
8 import fcntl
9 import formatter
10 import logging
11 import pwd
12 import select
13 import shlex
14 import shutil
15 import signal
16 import sys
17 import textwrap
18 import urlparse
19 import weakref
20 import gc
21 import os, stat
22 import platform
23
24 try:
25         import portage
26 except ImportError:
27         from os import path as osp
28         sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
29         import portage
30
31 from portage import digraph
32 from portage.const import NEWS_LIB_PATH
33
34 import _emerge.help
35 import portage.xpak, commands, errno, re, socket, time
36 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
37         nc_len, red, teal, turquoise, xtermTitle, \
38         xtermTitleReset, yellow
39 from portage.output import create_color_func
40 good = create_color_func("GOOD")
41 bad = create_color_func("BAD")
42 # white looks bad on terminals with white background
43 from portage.output import bold as white
44
45 import portage.elog
46 import portage.dep
47 portage.dep._dep_check_strict = True
48 import portage.util
49 import portage.locks
50 import portage.exception
51 from portage.data import secpass
52 from portage.elog.messages import eerror
53 from portage.util import normalize_path as normpath
54 from portage.util import cmp_sort_key, writemsg, writemsg_level
55 from portage.sets import load_default_config, SETPREFIX
56 from portage.sets.base import InternalPackageSet
57
58 from itertools import chain, izip
59
60 try:
61         import cPickle as pickle
62 except ImportError:
63         import pickle
64
65 try:
66         from cStringIO import StringIO
67 except ImportError:
68         from StringIO import StringIO
69
70 class stdout_spinner(object):
71         scroll_msgs = [
72                 "Gentoo Rocks ("+platform.system()+")",
73                 "Thank you for using Gentoo. :)",
74                 "Are you actually trying to read this?",
75                 "How many times have you stared at this?",
76                 "We are generating the cache right now",
77                 "You are paying too much attention.",
78                 "A theory is better than its explanation.",
79                 "Phasers locked on target, Captain.",
80                 "Thrashing is just virtual crashing.",
81                 "To be is to program.",
82                 "Real Users hate Real Programmers.",
83                 "When all else fails, read the instructions.",
84                 "Functionality breeds Contempt.",
85                 "The future lies ahead.",
86                 "3.1415926535897932384626433832795028841971694",
87                 "Sometimes insanity is the only alternative.",
88                 "Inaccuracy saves a world of explanation.",
89         ]
90
91         twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
92
93         def __init__(self):
94                 self.spinpos = 0
95                 self.update = self.update_twirl
96                 self.scroll_sequence = self.scroll_msgs[
97                         int(time.time() * 100) % len(self.scroll_msgs)]
98                 self.last_update = 0
99                 self.min_display_latency = 0.05
100
101         def _return_early(self):
102                 """
103                 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
104                 each update* method should return without doing any output when this
105                 method returns True.
106                 """
107                 cur_time = time.time()
108                 if cur_time - self.last_update < self.min_display_latency:
109                         return True
110                 self.last_update = cur_time
111                 return False
112
113         def update_basic(self):
114                 self.spinpos = (self.spinpos + 1) % 500
115                 if self._return_early():
116                         return
117                 if (self.spinpos % 100) == 0:
118                         if self.spinpos == 0:
119                                 sys.stdout.write(". ")
120                         else:
121                                 sys.stdout.write(".")
122                 sys.stdout.flush()
123
124         def update_scroll(self):
125                 if self._return_early():
126                         return
127                 if(self.spinpos >= len(self.scroll_sequence)):
128                         sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
129                                 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
130                 else:
131                         sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
132                 sys.stdout.flush()
133                 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
134
135         def update_twirl(self):
136                 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
137                 if self._return_early():
138                         return
139                 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
140                 sys.stdout.flush()
141
142         def update_quiet(self):
143                 return
144
145 def userquery(prompt, responses=None, colours=None):
146         """Displays a prompt and a set of responses, then waits for a response
147         which is checked against the responses and the first to match is
148         returned.  An empty response will match the first value in responses.  The
149         input buffer is *not* cleared prior to the prompt!
150
151         prompt: a String.
152         responses: a List of Strings.
153         colours: a List of Functions taking and returning a String, used to
154         process the responses for display. Typically these will be functions
155         like red() but could be e.g. lambda x: "DisplayString".
156         If responses is omitted, defaults to ["Yes", "No"], [green, red].
157         If only colours is omitted, defaults to [bold, ...].
158
159         Returns a member of the List responses. (If called without optional
160         arguments, returns "Yes" or "No".)
161         KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
162         printed."""
163         if responses is None:
164                 responses = ["Yes", "No"]
165                 colours = [
166                         create_color_func("PROMPT_CHOICE_DEFAULT"),
167                         create_color_func("PROMPT_CHOICE_OTHER")
168                 ]
169         elif colours is None:
170                 colours=[bold]
171         colours=(colours*len(responses))[:len(responses)]
172         print bold(prompt),
173         try:
174                 while True:
175                         response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
176                         for key in responses:
177                                 # An empty response will match the first value in responses.
178                                 if response.upper()==key[:len(response)].upper():
179                                         return key
180                         print "Sorry, response '%s' not understood." % response,
181         except (EOFError, KeyboardInterrupt):
182                 print "Interrupted."
183                 sys.exit(1)
184
185 actions = frozenset([
186 "clean", "config", "depclean",
187 "info", "list-sets", "metadata",
188 "prune", "regen",  "search",
189 "sync",  "unmerge",
190 ])
191 options=[
192 "--ask",          "--alphabetical",
193 "--buildpkg",     "--buildpkgonly",
194 "--changelog",    "--columns",
195 "--complete-graph",
196 "--debug",        "--deep",
197 "--digest",
198 "--emptytree",
199 "--fetchonly",    "--fetch-all-uri",
200 "--getbinpkg",    "--getbinpkgonly",
201 "--help",         "--ignore-default-opts",
202 "--keep-going",
203 "--noconfmem",
204 "--newuse",       "--nocolor",
205 "--nodeps",       "--noreplace",
206 "--nospinner",    "--oneshot",
207 "--onlydeps",     "--pretend",
208 "--quiet",        "--resume",
209 "--searchdesc",   "--selective",
210 "--skipfirst",
211 "--tree",
212 "--update",
213 "--usepkg",       "--usepkgonly",
214 "--verbose",      "--version"
215 ]
216
217 shortmapping={
218 "1":"--oneshot",
219 "a":"--ask",
220 "b":"--buildpkg",  "B":"--buildpkgonly",
221 "c":"--clean",     "C":"--unmerge",
222 "d":"--debug",     "D":"--deep",
223 "e":"--emptytree",
224 "f":"--fetchonly", "F":"--fetch-all-uri",
225 "g":"--getbinpkg", "G":"--getbinpkgonly",
226 "h":"--help",
227 "k":"--usepkg",    "K":"--usepkgonly",
228 "l":"--changelog",
229 "n":"--noreplace", "N":"--newuse",
230 "o":"--onlydeps",  "O":"--nodeps",
231 "p":"--pretend",   "P":"--prune",
232 "q":"--quiet",
233 "s":"--search",    "S":"--searchdesc",
234 "t":"--tree",
235 "u":"--update",
236 "v":"--verbose",   "V":"--version"
237 }
238
239 def emergelog(xterm_titles, mystr, short_msg=None):
240         if xterm_titles and short_msg:
241                 if "HOSTNAME" in os.environ:
242                         short_msg = os.environ["HOSTNAME"]+": "+short_msg
243                 xtermTitle(short_msg)
244         try:
245                 file_path = "/var/log/emerge.log"
246                 mylogfile = open(file_path, "a")
247                 portage.util.apply_secpass_permissions(file_path,
248                         uid=portage.portage_uid, gid=portage.portage_gid,
249                         mode=0660)
250                 mylock = None
251                 try:
252                         mylock = portage.locks.lockfile(mylogfile)
253                         # seek because we may have gotten held up by the lock.
254                         # if so, we may not be positioned at the end of the file.
255                         mylogfile.seek(0, 2)
256                         mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
257                         mylogfile.flush()
258                 finally:
259                         if mylock:
260                                 portage.locks.unlockfile(mylock)
261                         mylogfile.close()
262         except (IOError,OSError,portage.exception.PortageException), e:
263                 if secpass >= 1:
264                         print >> sys.stderr, "emergelog():",e
265
266 def countdown(secs=5, doing="Starting"):
267         if secs:
268                 print ">>> Waiting",secs,"seconds before starting..."
269                 print ">>> (Control-C to abort)...\n"+doing+" in: ",
270                 ticks=range(secs)
271                 ticks.reverse()
272                 for sec in ticks:
273                         sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
274                         sys.stdout.flush()
275                         time.sleep(1)
276                 print
277
278 # formats a size given in bytes nicely
279 def format_size(mysize):
280         if isinstance(mysize, basestring):
281                 return mysize
282         if 0 != mysize % 1024:
283                 # Always round up to the next kB so that it doesn't show 0 kB when
284                 # some small file still needs to be fetched.
285                 mysize += 1024 - mysize % 1024
286         mystr=str(mysize/1024)
287         mycount=len(mystr)
288         while (mycount > 3):
289                 mycount-=3
290                 mystr=mystr[:mycount]+","+mystr[mycount:]
291         return mystr+" kB"
292
293
294 def getgccversion(chost):
295         """
296         rtype: C{str}
297         return:  the current in-use gcc version
298         """
299
300         gcc_ver_command = 'gcc -dumpversion'
301         gcc_ver_prefix = 'gcc-'
302
303         gcc_not_found_error = red(
304         "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
305         "!!! to update the environment of this terminal and possibly\n" +
306         "!!! other terminals also.\n"
307         )
308
309         mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
310         if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
311                 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
312
313         mystatus, myoutput = commands.getstatusoutput(
314                 chost + "-" + gcc_ver_command)
315         if mystatus == os.EX_OK:
316                 return gcc_ver_prefix + myoutput
317
318         mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
319         if mystatus == os.EX_OK:
320                 return gcc_ver_prefix + myoutput
321
322         portage.writemsg(gcc_not_found_error, noiselevel=-1)
323         return "[unavailable]"
324
325 def getportageversion(portdir, target_root, profile, chost, vardb):
326         profilever = "unavailable"
327         if profile:
328                 realpath = os.path.realpath(profile)
329                 basepath   = os.path.realpath(os.path.join(portdir, "profiles"))
330                 if realpath.startswith(basepath):
331                         profilever = realpath[1 + len(basepath):]
332                 else:
333                         try:
334                                 profilever = "!" + os.readlink(profile)
335                         except (OSError):
336                                 pass
337                 del realpath, basepath
338
339         libcver=[]
340         libclist  = vardb.match("virtual/libc")
341         libclist += vardb.match("virtual/glibc")
342         libclist  = portage.util.unique_array(libclist)
343         for x in libclist:
344                 xs=portage.catpkgsplit(x)
345                 if libcver:
346                         libcver+=","+"-".join(xs[1:])
347                 else:
348                         libcver="-".join(xs[1:])
349         if libcver==[]:
350                 libcver="unavailable"
351
352         gccver = getgccversion(chost)
353         unameout=platform.release()+" "+platform.machine()
354
355         return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
356
357 def create_depgraph_params(myopts, myaction):
358         #configure emerge engine parameters
359         #
360         # self:      include _this_ package regardless of if it is merged.
361         # selective: exclude the package if it is merged
362         # recurse:   go into the dependencies
363         # deep:      go into the dependencies of already merged packages
364         # empty:     pretend nothing is merged
365         # complete:  completely account for all known dependencies
366         # remove:    build graph for use in removing packages
367         myparams = set(["recurse"])
368
369         if myaction == "remove":
370                 myparams.add("remove")
371                 myparams.add("complete")
372                 return myparams
373
374         if "--update" in myopts or \
375                 "--newuse" in myopts or \
376                 "--reinstall" in myopts or \
377                 "--noreplace" in myopts:
378                 myparams.add("selective")
379         if "--emptytree" in myopts:
380                 myparams.add("empty")
381                 myparams.discard("selective")
382         if "--nodeps" in myopts:
383                 myparams.discard("recurse")
384         if "--deep" in myopts:
385                 myparams.add("deep")
386         if "--complete-graph" in myopts:
387                 myparams.add("complete")
388         return myparams
389
390 # search functionality
391 class search(object):
392
393         #
394         # class constants
395         #
396         VERSION_SHORT=1
397         VERSION_RELEASE=2
398
399         #
400         # public interface
401         #
402         def __init__(self, root_config, spinner, searchdesc,
403                 verbose, usepkg, usepkgonly):
404                 """Searches the available and installed packages for the supplied search key.
405                 The list of available and installed packages is created at object instantiation.
406                 This makes successive searches faster."""
407                 self.settings = root_config.settings
408                 self.vartree = root_config.trees["vartree"]
409                 self.spinner = spinner
410                 self.verbose = verbose
411                 self.searchdesc = searchdesc
412                 self.root_config = root_config
413                 self.setconfig = root_config.setconfig
414                 self.matches = {"pkg" : []}
415                 self.mlen = 0
416
417                 def fake_portdb():
418                         pass
419                 self.portdb = fake_portdb
420                 for attrib in ("aux_get", "cp_all",
421                         "xmatch", "findname", "getFetchMap"):
422                         setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
423
424                 self._dbs = []
425
426                 portdb = root_config.trees["porttree"].dbapi
427                 bindb = root_config.trees["bintree"].dbapi
428                 vardb = root_config.trees["vartree"].dbapi
429
430                 if not usepkgonly and portdb._have_root_eclass_dir:
431                         self._dbs.append(portdb)
432
433                 if (usepkg or usepkgonly) and bindb.cp_all():
434                         self._dbs.append(bindb)
435
436                 self._dbs.append(vardb)
437                 self._portdb = portdb
438
439         def _cp_all(self):
440                 cp_all = set()
441                 for db in self._dbs:
442                         cp_all.update(db.cp_all())
443                 return list(sorted(cp_all))
444
445         def _aux_get(self, *args, **kwargs):
446                 for db in self._dbs:
447                         try:
448                                 return db.aux_get(*args, **kwargs)
449                         except KeyError:
450                                 pass
451                 raise
452
453         def _findname(self, *args, **kwargs):
454                 for db in self._dbs:
455                         if db is not self._portdb:
456                                 # We don't want findname to return anything
457                                 # unless it's an ebuild in a portage tree.
458                                 # Otherwise, it's already built and we don't
459                                 # care about it.
460                                 continue
461                         func = getattr(db, "findname", None)
462                         if func:
463                                 value = func(*args, **kwargs)
464                                 if value:
465                                         return value
466                 return None
467
468         def _getFetchMap(self, *args, **kwargs):
469                 for db in self._dbs:
470                         func = getattr(db, "getFetchMap", None)
471                         if func:
472                                 value = func(*args, **kwargs)
473                                 if value:
474                                         return value
475                 return {}
476
477         def _visible(self, db, cpv, metadata):
478                 installed = db is self.vartree.dbapi
479                 built = installed or db is not self._portdb
480                 pkg_type = "ebuild"
481                 if installed:
482                         pkg_type = "installed"
483                 elif built:
484                         pkg_type = "binary"
485                 return visible(self.settings,
486                         Package(type_name=pkg_type, root_config=self.root_config,
487                         cpv=cpv, built=built, installed=installed, metadata=metadata))
488
489         def _xmatch(self, level, atom):
490                 """
491                 This method does not expand old-style virtuals because it
492                 is restricted to returning matches for a single ${CATEGORY}/${PN}
493                 and old-style virual matches unreliable for that when querying
494                 multiple package databases. If necessary, old-style virtuals
495                 can be performed on atoms prior to calling this method.
496                 """
497                 cp = portage.dep_getkey(atom)
498                 if level == "match-all":
499                         matches = set()
500                         for db in self._dbs:
501                                 if hasattr(db, "xmatch"):
502                                         matches.update(db.xmatch(level, atom))
503                                 else:
504                                         matches.update(db.match(atom))
505                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
506                         db._cpv_sort_ascending(result)
507                 elif level == "match-visible":
508                         matches = set()
509                         for db in self._dbs:
510                                 if hasattr(db, "xmatch"):
511                                         matches.update(db.xmatch(level, atom))
512                                 else:
513                                         db_keys = list(db._aux_cache_keys)
514                                         for cpv in db.match(atom):
515                                                 metadata = izip(db_keys,
516                                                         db.aux_get(cpv, db_keys))
517                                                 if not self._visible(db, cpv, metadata):
518                                                         continue
519                                                 matches.add(cpv)
520                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
521                         db._cpv_sort_ascending(result)
522                 elif level == "bestmatch-visible":
523                         result = None
524                         for db in self._dbs:
525                                 if hasattr(db, "xmatch"):
526                                         cpv = db.xmatch("bestmatch-visible", atom)
527                                         if not cpv or portage.cpv_getkey(cpv) != cp:
528                                                 continue
529                                         if not result or cpv == portage.best([cpv, result]):
530                                                 result = cpv
531                                 else:
532                                         db_keys = Package.metadata_keys
533                                         # break out of this loop with highest visible
534                                         # match, checked in descending order
535                                         for cpv in reversed(db.match(atom)):
536                                                 if portage.cpv_getkey(cpv) != cp:
537                                                         continue
538                                                 metadata = izip(db_keys,
539                                                         db.aux_get(cpv, db_keys))
540                                                 if not self._visible(db, cpv, metadata):
541                                                         continue
542                                                 if not result or cpv == portage.best([cpv, result]):
543                                                         result = cpv
544                                                 break
545                 else:
546                         raise NotImplementedError(level)
547                 return result
548
549         def execute(self,searchkey):
550                 """Performs the search for the supplied search key"""
551                 match_category = 0
552                 self.searchkey=searchkey
553                 self.packagematches = []
554                 if self.searchdesc:
555                         self.searchdesc=1
556                         self.matches = {"pkg":[], "desc":[], "set":[]}
557                 else:
558                         self.searchdesc=0
559                         self.matches = {"pkg":[], "set":[]}
560                 print "Searching...   ",
561
562                 regexsearch = False
563                 if self.searchkey.startswith('%'):
564                         regexsearch = True
565                         self.searchkey = self.searchkey[1:]
566                 if self.searchkey.startswith('@'):
567                         match_category = 1
568                         self.searchkey = self.searchkey[1:]
569                 if regexsearch:
570                         self.searchre=re.compile(self.searchkey,re.I)
571                 else:
572                         self.searchre=re.compile(re.escape(self.searchkey), re.I)
573                 for package in self.portdb.cp_all():
574                         self.spinner.update()
575
576                         if match_category:
577                                 match_string  = package[:]
578                         else:
579                                 match_string  = package.split("/")[-1]
580
581                         masked=0
582                         if self.searchre.search(match_string):
583                                 if not self.portdb.xmatch("match-visible", package):
584                                         masked=1
585                                 self.matches["pkg"].append([package,masked])
586                         elif self.searchdesc: # DESCRIPTION searching
587                                 full_package = self.portdb.xmatch("bestmatch-visible", package)
588                                 if not full_package:
589                                         #no match found; we don't want to query description
590                                         full_package = portage.best(
591                                                 self.portdb.xmatch("match-all", package))
592                                         if not full_package:
593                                                 continue
594                                         else:
595                                                 masked=1
596                                 try:
597                                         full_desc = self.portdb.aux_get(
598                                                 full_package, ["DESCRIPTION"])[0]
599                                 except KeyError:
600                                         print "emerge: search: aux_get() failed, skipping"
601                                         continue
602                                 if self.searchre.search(full_desc):
603                                         self.matches["desc"].append([full_package,masked])
604
605                 self.sdict = self.setconfig.getSets()
606                 for setname in self.sdict:
607                         self.spinner.update()
608                         if match_category:
609                                 match_string = setname
610                         else:
611                                 match_string = setname.split("/")[-1]
612                         
613                         if self.searchre.search(match_string):
614                                 self.matches["set"].append([setname, False])
615                         elif self.searchdesc:
616                                 if self.searchre.search(
617                                         self.sdict[setname].getMetadata("DESCRIPTION")):
618                                         self.matches["set"].append([setname, False])
619                         
620                 self.mlen=0
621                 for mtype in self.matches:
622                         self.matches[mtype].sort()
623                         self.mlen += len(self.matches[mtype])
624
625         def addCP(self, cp):
626                 if not self.portdb.xmatch("match-all", cp):
627                         return
628                 masked = 0
629                 if not self.portdb.xmatch("bestmatch-visible", cp):
630                         masked = 1
631                 self.matches["pkg"].append([cp, masked])
632                 self.mlen += 1
633
634         def output(self):
635                 """Outputs the results of the search."""
636                 print "\b\b  \n[ Results for search key : "+white(self.searchkey)+" ]"
637                 print "[ Applications found : "+white(str(self.mlen))+" ]"
638                 print " "
639                 vardb = self.vartree.dbapi
640                 for mtype in self.matches:
641                         for match,masked in self.matches[mtype]:
642                                 full_package = None
643                                 if mtype == "pkg":
644                                         catpack = match
645                                         full_package = self.portdb.xmatch(
646                                                 "bestmatch-visible", match)
647                                         if not full_package:
648                                                 #no match found; we don't want to query description
649                                                 masked=1
650                                                 full_package = portage.best(
651                                                         self.portdb.xmatch("match-all",match))
652                                 elif mtype == "desc":
653                                         full_package = match
654                                         match        = portage.cpv_getkey(match)
655                                 elif mtype == "set":
656                                         print green("*")+"  "+white(match)
657                                         print "     ", darkgreen("Description:")+"  ", self.sdict[match].getMetadata("DESCRIPTION")
658                                         print
659                                 if full_package:
660                                         try:
661                                                 desc, homepage, license = self.portdb.aux_get(
662                                                         full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
663                                         except KeyError:
664                                                 print "emerge: search: aux_get() failed, skipping"
665                                                 continue
666                                         if masked:
667                                                 print green("*")+"  "+white(match)+" "+red("[ Masked ]")
668                                         else:
669                                                 print green("*")+"  "+white(match)
670                                         myversion = self.getVersion(full_package, search.VERSION_RELEASE)
671
672                                         mysum = [0,0]
673                                         file_size_str = None
674                                         mycat = match.split("/")[0]
675                                         mypkg = match.split("/")[1]
676                                         mycpv = match + "-" + myversion
677                                         myebuild = self.portdb.findname(mycpv)
678                                         if myebuild:
679                                                 pkgdir = os.path.dirname(myebuild)
680                                                 from portage import manifest
681                                                 mf = manifest.Manifest(
682                                                         pkgdir, self.settings["DISTDIR"])
683                                                 try:
684                                                         uri_map = self.portdb.getFetchMap(mycpv)
685                                                 except portage.exception.InvalidDependString, e:
686                                                         file_size_str = "Unknown (%s)" % (e,)
687                                                         del e
688                                                 else:
689                                                         try:
690                                                                 mysum[0] = mf.getDistfilesSize(uri_map)
691                                                         except KeyError, e:
692                                                                 file_size_str = "Unknown (missing " + \
693                                                                         "digest for %s)" % (e,)
694                                                                 del e
695
696                                         available = False
697                                         for db in self._dbs:
698                                                 if db is not vardb and \
699                                                         db.cpv_exists(mycpv):
700                                                         available = True
701                                                         if not myebuild and hasattr(db, "bintree"):
702                                                                 myebuild = db.bintree.getname(mycpv)
703                                                                 try:
704                                                                         mysum[0] = os.stat(myebuild).st_size
705                                                                 except OSError:
706                                                                         myebuild = None
707                                                         break
708
709                                         if myebuild and file_size_str is None:
710                                                 mystr = str(mysum[0] / 1024)
711                                                 mycount = len(mystr)
712                                                 while (mycount > 3):
713                                                         mycount -= 3
714                                                         mystr = mystr[:mycount] + "," + mystr[mycount:]
715                                                 file_size_str = mystr + " kB"
716
717                                         if self.verbose:
718                                                 if available:
719                                                         print "     ", darkgreen("Latest version available:"),myversion
720                                                 print "     ", self.getInstallationStatus(mycat+'/'+mypkg)
721                                                 if myebuild:
722                                                         print "      %s %s" % \
723                                                                 (darkgreen("Size of files:"), file_size_str)
724                                                 print "     ", darkgreen("Homepage:")+"     ",homepage
725                                                 print "     ", darkgreen("Description:")+"  ",desc
726                                                 print "     ", darkgreen("License:")+"      ",license
727                                                 print
728         #
729         # private interface
730         #
731         def getInstallationStatus(self,package):
732                 installed_package = self.vartree.dep_bestmatch(package)
733                 result = ""
734                 version = self.getVersion(installed_package,search.VERSION_RELEASE)
735                 if len(version) > 0:
736                         result = darkgreen("Latest version installed:")+" "+version
737                 else:
738                         result = darkgreen("Latest version installed:")+" [ Not Installed ]"
739                 return result
740
741         def getVersion(self,full_package,detail):
742                 if len(full_package) > 1:
743                         package_parts = portage.catpkgsplit(full_package)
744                         if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
745                                 result = package_parts[2]+ "-" + package_parts[3]
746                         else:
747                                 result = package_parts[2]
748                 else:
749                         result = ""
750                 return result
751
752 class RootConfig(object):
753         """This is used internally by depgraph to track information about a
754         particular $ROOT."""
755
756         pkg_tree_map = {
757                 "ebuild"    : "porttree",
758                 "binary"    : "bintree",
759                 "installed" : "vartree"
760         }
761
762         tree_pkg_map = {}
763         for k, v in pkg_tree_map.iteritems():
764                 tree_pkg_map[v] = k
765
766         def __init__(self, settings, trees, setconfig):
767                 self.trees = trees
768                 self.settings = settings
769                 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
770                 self.root = self.settings["ROOT"]
771                 self.setconfig = setconfig
772                 self.sets = self.setconfig.getSets()
773                 self.visible_pkgs = PackageVirtualDbapi(self.settings)
774
775 def create_world_atom(pkg, args_set, root_config):
776         """Create a new atom for the world file if one does not exist.  If the
777         argument atom is precise enough to identify a specific slot then a slot
778         atom will be returned. Atoms that are in the system set may also be stored
779         in world since system atoms can only match one slot while world atoms can
780         be greedy with respect to slots.  Unslotted system packages will not be
781         stored in world."""
782
783         arg_atom = args_set.findAtomForPackage(pkg)
784         if not arg_atom:
785                 return None
786         cp = portage.dep_getkey(arg_atom)
787         new_world_atom = cp
788         sets = root_config.sets
789         portdb = root_config.trees["porttree"].dbapi
790         vardb = root_config.trees["vartree"].dbapi
791         available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
792                 for cpv in portdb.match(cp))
793         slotted = len(available_slots) > 1 or \
794                 (len(available_slots) == 1 and "0" not in available_slots)
795         if not slotted:
796                 # check the vdb in case this is multislot
797                 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
798                         for cpv in vardb.match(cp))
799                 slotted = len(available_slots) > 1 or \
800                         (len(available_slots) == 1 and "0" not in available_slots)
801         if slotted and arg_atom != cp:
802                 # If the user gave a specific atom, store it as a
803                 # slot atom in the world file.
804                 slot_atom = pkg.slot_atom
805
806                 # For USE=multislot, there are a couple of cases to
807                 # handle here:
808                 #
809                 # 1) SLOT="0", but the real SLOT spontaneously changed to some
810                 #    unknown value, so just record an unslotted atom.
811                 #
812                 # 2) SLOT comes from an installed package and there is no
813                 #    matching SLOT in the portage tree.
814                 #
815                 # Make sure that the slot atom is available in either the
816                 # portdb or the vardb, since otherwise the user certainly
817                 # doesn't want the SLOT atom recorded in the world file
818                 # (case 1 above).  If it's only available in the vardb,
819                 # the user may be trying to prevent a USE=multislot
820                 # package from being removed by --depclean (case 2 above).
821
822                 mydb = portdb
823                 if not portdb.match(slot_atom):
824                         # SLOT seems to come from an installed multislot package
825                         mydb = vardb
826                 # If there is no installed package matching the SLOT atom,
827                 # it probably changed SLOT spontaneously due to USE=multislot,
828                 # so just record an unslotted atom.
829                 if vardb.match(slot_atom):
830                         # Now verify that the argument is precise
831                         # enough to identify a specific slot.
832                         matches = mydb.match(arg_atom)
833                         matched_slots = set()
834                         for cpv in matches:
835                                 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
836                         if len(matched_slots) == 1:
837                                 new_world_atom = slot_atom
838
839         if new_world_atom == sets["world"].findAtomForPackage(pkg):
840                 # Both atoms would be identical, so there's nothing to add.
841                 return None
842         if not slotted:
843                 # Unlike world atoms, system atoms are not greedy for slots, so they
844                 # can't be safely excluded from world if they are slotted.
845                 system_atom = sets["system"].findAtomForPackage(pkg)
846                 if system_atom:
847                         if not portage.dep_getkey(system_atom).startswith("virtual/"):
848                                 return None
849                         # System virtuals aren't safe to exclude from world since they can
850                         # match multiple old-style virtuals but only one of them will be
851                         # pulled in by update or depclean.
852                         providers = portdb.mysettings.getvirtuals().get(
853                                 portage.dep_getkey(system_atom))
854                         if providers and len(providers) == 1 and providers[0] == cp:
855                                 return None
856         return new_world_atom
857
858 def filter_iuse_defaults(iuse):
859         for flag in iuse:
860                 if flag.startswith("+") or flag.startswith("-"):
861                         yield flag[1:]
862                 else:
863                         yield flag
864
865 class SlotObject(object):
866         __slots__ = ("__weakref__",)
867
868         def __init__(self, **kwargs):
869                 classes = [self.__class__]
870                 while classes:
871                         c = classes.pop()
872                         if c is SlotObject:
873                                 continue
874                         classes.extend(c.__bases__)
875                         slots = getattr(c, "__slots__", None)
876                         if not slots:
877                                 continue
878                         for myattr in slots:
879                                 myvalue = kwargs.get(myattr, None)
880                                 setattr(self, myattr, myvalue)
881
882         def copy(self):
883                 """
884                 Create a new instance and copy all attributes
885                 defined from __slots__ (including those from
886                 inherited classes).
887                 """
888                 obj = self.__class__()
889
890                 classes = [self.__class__]
891                 while classes:
892                         c = classes.pop()
893                         if c is SlotObject:
894                                 continue
895                         classes.extend(c.__bases__)
896                         slots = getattr(c, "__slots__", None)
897                         if not slots:
898                                 continue
899                         for myattr in slots:
900                                 setattr(obj, myattr, getattr(self, myattr))
901
902                 return obj
903
904 class AbstractDepPriority(SlotObject):
905         __slots__ = ("buildtime", "runtime", "runtime_post")
906
907         def __lt__(self, other):
908                 return self.__int__() < other
909
910         def __le__(self, other):
911                 return self.__int__() <= other
912
913         def __eq__(self, other):
914                 return self.__int__() == other
915
916         def __ne__(self, other):
917                 return self.__int__() != other
918
919         def __gt__(self, other):
920                 return self.__int__() > other
921
922         def __ge__(self, other):
923                 return self.__int__() >= other
924
925         def copy(self):
926                 import copy
927                 return copy.copy(self)
928
929 class DepPriority(AbstractDepPriority):
930
931         __slots__ = ("satisfied", "optional", "rebuild")
932
933         def __int__(self):
934                 return 0
935
936         def __str__(self):
937                 if self.optional:
938                         return "optional"
939                 if self.buildtime:
940                         return "buildtime"
941                 if self.runtime:
942                         return "runtime"
943                 if self.runtime_post:
944                         return "runtime_post"
945                 return "soft"
946
947 class BlockerDepPriority(DepPriority):
948         __slots__ = ()
949         def __int__(self):
950                 return 0
951
952         def __str__(self):
953                 return 'blocker'
954
955 BlockerDepPriority.instance = BlockerDepPriority()
956
957 class UnmergeDepPriority(AbstractDepPriority):
958         __slots__ = ("optional", "satisfied",)
959         """
960         Combination of properties           Priority  Category
961
962         runtime                                0       HARD
963         runtime_post                          -1       HARD
964         buildtime                             -2       SOFT
965         (none of the above)                   -2       SOFT
966         """
967
968         MAX    =  0
969         SOFT   = -2
970         MIN    = -2
971
972         def __int__(self):
973                 if self.runtime:
974                         return 0
975                 if self.runtime_post:
976                         return -1
977                 if self.buildtime:
978                         return -2
979                 return -2
980
981         def __str__(self):
982                 myvalue = self.__int__()
983                 if myvalue > self.SOFT:
984                         return "hard"
985                 return "soft"
986
987 class DepPriorityNormalRange(object):
988         """
989         DepPriority properties              Index      Category
990
991         buildtime                                      HARD
992         runtime                                3       MEDIUM
993         runtime_post                           2       MEDIUM_SOFT
994         optional                               1       SOFT
995         (none of the above)                    0       NONE
996         """
997         MEDIUM      = 3
998         MEDIUM_SOFT = 2
999         SOFT        = 1
1000         NONE        = 0
1001
1002         @classmethod
1003         def _ignore_optional(cls, priority):
1004                 if priority.__class__ is not DepPriority:
1005                         return False
1006                 return bool(priority.optional)
1007
1008         @classmethod
1009         def _ignore_runtime_post(cls, priority):
1010                 if priority.__class__ is not DepPriority:
1011                         return False
1012                 return bool(priority.optional or priority.runtime_post)
1013
1014         @classmethod
1015         def _ignore_runtime(cls, priority):
1016                 if priority.__class__ is not DepPriority:
1017                         return False
1018                 return not priority.buildtime
1019
1020         ignore_medium      = _ignore_runtime
1021         ignore_medium_soft = _ignore_runtime_post
1022         ignore_soft        = _ignore_optional
1023
1024 DepPriorityNormalRange.ignore_priority = (
1025         None,
1026         DepPriorityNormalRange._ignore_optional,
1027         DepPriorityNormalRange._ignore_runtime_post,
1028         DepPriorityNormalRange._ignore_runtime
1029 )
1030
1031 class DepPrioritySatisfiedRange(object):
1032         """
1033         DepPriority                         Index      Category
1034
1035         not satisfied and buildtime                    HARD
1036         not satisfied and runtime              7       MEDIUM
1037         not satisfied and runtime_post         6       MEDIUM_SOFT
1038         satisfied and buildtime and rebuild    5       SOFT
1039         satisfied and buildtime                4       SOFT
1040         satisfied and runtime                  3       SOFT
1041         satisfied and runtime_post             2       SOFT
1042         optional                               1       SOFT
1043         (none of the above)                    0       NONE
1044         """
1045         MEDIUM      = 7
1046         MEDIUM_SOFT = 6
1047         SOFT        = 5
1048         NONE        = 0
1049
1050         @classmethod
1051         def _ignore_optional(cls, priority):
1052                 if priority.__class__ is not DepPriority:
1053                         return False
1054                 return bool(priority.optional)
1055
1056         @classmethod
1057         def _ignore_satisfied_runtime_post(cls, priority):
1058                 if priority.__class__ is not DepPriority:
1059                         return False
1060                 if priority.optional:
1061                         return True
1062                 if not priority.satisfied:
1063                         return False
1064                 return bool(priority.runtime_post)
1065
1066         @classmethod
1067         def _ignore_satisfied_runtime(cls, priority):
1068                 if priority.__class__ is not DepPriority:
1069                         return False
1070                 if priority.optional:
1071                         return True
1072                 if not priority.satisfied:
1073                         return False
1074                 return not priority.buildtime
1075
1076         @classmethod
1077         def _ignore_satisfied_buildtime(cls, priority):
1078                 if priority.__class__ is not DepPriority:
1079                         return False
1080                 if priority.optional:
1081                         return True
1082                 if not priority.satisfied:
1083                         return False
1084                 if priority.buildtime:
1085                         return not priority.rebuild
1086                 return True
1087
1088         @classmethod
1089         def _ignore_satisfied_buildtime_rebuild(cls, priority):
1090                 if priority.__class__ is not DepPriority:
1091                         return False
1092                 if priority.optional:
1093                         return True
1094                 return bool(priority.satisfied)
1095
1096         @classmethod
1097         def _ignore_runtime_post(cls, priority):
1098                 if priority.__class__ is not DepPriority:
1099                         return False
1100                 return bool(priority.optional or \
1101                         priority.satisfied or \
1102                         priority.runtime_post)
1103
1104         @classmethod
1105         def _ignore_runtime(cls, priority):
1106                 if priority.__class__ is not DepPriority:
1107                         return False
1108                 return bool(priority.satisfied or \
1109                         not priority.buildtime)
1110
1111         ignore_medium      = _ignore_runtime
1112         ignore_medium_soft = _ignore_runtime_post
1113         ignore_soft        = _ignore_satisfied_buildtime_rebuild
1114
1115 DepPrioritySatisfiedRange.ignore_priority = (
1116         None,
1117         DepPrioritySatisfiedRange._ignore_optional,
1118         DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1119         DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1120         DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1121         DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1122         DepPrioritySatisfiedRange._ignore_runtime_post,
1123         DepPrioritySatisfiedRange._ignore_runtime
1124 )
1125
1126 def _find_deep_system_runtime_deps(graph):
1127         deep_system_deps = set()
1128         node_stack = []
1129         for node in graph:
1130                 if not isinstance(node, Package) or \
1131                         node.operation == 'uninstall':
1132                         continue
1133                 if node.root_config.sets['system'].findAtomForPackage(node):
1134                         node_stack.append(node)
1135
1136         def ignore_priority(priority):
1137                 """
1138                 Ignore non-runtime priorities.
1139                 """
1140                 if isinstance(priority, DepPriority) and \
1141                         (priority.runtime or priority.runtime_post):
1142                         return False
1143                 return True
1144
1145         while node_stack:
1146                 node = node_stack.pop()
1147                 if node in deep_system_deps:
1148                         continue
1149                 deep_system_deps.add(node)
1150                 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1151                         if not isinstance(child, Package) or \
1152                                 child.operation == 'uninstall':
1153                                 continue
1154                         node_stack.append(child)
1155
1156         return deep_system_deps
1157
1158 class FakeVartree(portage.vartree):
1159         """This is implements an in-memory copy of a vartree instance that provides
1160         all the interfaces required for use by the depgraph.  The vardb is locked
1161         during the constructor call just long enough to read a copy of the
1162         installed package information.  This allows the depgraph to do it's
1163         dependency calculations without holding a lock on the vardb.  It also
1164         allows things like vardb global updates to be done in memory so that the
1165         user doesn't necessarily need write access to the vardb in cases where
1166         global updates are necessary (updates are performed when necessary if there
1167         is not a matching ebuild in the tree)."""
1168         def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1169                 self._root_config = root_config
1170                 if pkg_cache is None:
1171                         pkg_cache = {}
1172                 real_vartree = root_config.trees["vartree"]
1173                 portdb = root_config.trees["porttree"].dbapi
1174                 self.root = real_vartree.root
1175                 self.settings = real_vartree.settings
1176                 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1177                 if "_mtime_" not in mykeys:
1178                         mykeys.append("_mtime_")
1179                 self._db_keys = mykeys
1180                 self._pkg_cache = pkg_cache
1181                 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1182                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1183                 try:
1184                         # At least the parent needs to exist for the lock file.
1185                         portage.util.ensure_dirs(vdb_path)
1186                 except portage.exception.PortageException:
1187                         pass
1188                 vdb_lock = None
1189                 try:
1190                         if acquire_lock and os.access(vdb_path, os.W_OK):
1191                                 vdb_lock = portage.locks.lockdir(vdb_path)
1192                         real_dbapi = real_vartree.dbapi
1193                         slot_counters = {}
1194                         for cpv in real_dbapi.cpv_all():
1195                                 cache_key = ("installed", self.root, cpv, "nomerge")
1196                                 pkg = self._pkg_cache.get(cache_key)
1197                                 if pkg is not None:
1198                                         metadata = pkg.metadata
1199                                 else:
1200                                         metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1201                                 myslot = metadata["SLOT"]
1202                                 mycp = portage.dep_getkey(cpv)
1203                                 myslot_atom = "%s:%s" % (mycp, myslot)
1204                                 try:
1205                                         mycounter = long(metadata["COUNTER"])
1206                                 except ValueError:
1207                                         mycounter = 0
1208                                         metadata["COUNTER"] = str(mycounter)
1209                                 other_counter = slot_counters.get(myslot_atom, None)
1210                                 if other_counter is not None:
1211                                         if other_counter > mycounter:
1212                                                 continue
1213                                 slot_counters[myslot_atom] = mycounter
1214                                 if pkg is None:
1215                                         pkg = Package(built=True, cpv=cpv,
1216                                                 installed=True, metadata=metadata,
1217                                                 root_config=root_config, type_name="installed")
1218                                 self._pkg_cache[pkg] = pkg
1219                                 self.dbapi.cpv_inject(pkg)
1220                         real_dbapi.flush_cache()
1221                 finally:
1222                         if vdb_lock:
1223                                 portage.locks.unlockdir(vdb_lock)
1224                 # Populate the old-style virtuals using the cached values.
1225                 if not self.settings.treeVirtuals:
1226                         self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1227                                 portage.getCPFromCPV, self.get_all_provides())
1228
1229                 # Intialize variables needed for lazy cache pulls of the live ebuild
1230                 # metadata.  This ensures that the vardb lock is released ASAP, without
1231                 # being delayed in case cache generation is triggered.
1232                 self._aux_get = self.dbapi.aux_get
1233                 self.dbapi.aux_get = self._aux_get_wrapper
1234                 self._match = self.dbapi.match
1235                 self.dbapi.match = self._match_wrapper
1236                 self._aux_get_history = set()
1237                 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1238                 self._portdb = portdb
1239                 self._global_updates = None
1240
1241         def _match_wrapper(self, cpv, use_cache=1):
1242                 """
1243                 Make sure the metadata in Package instances gets updated for any
1244                 cpv that is returned from a match() call, since the metadata can
1245                 be accessed directly from the Package instance instead of via
1246                 aux_get().
1247                 """
1248                 matches = self._match(cpv, use_cache=use_cache)
1249                 for cpv in matches:
1250                         if cpv in self._aux_get_history:
1251                                 continue
1252                         self._aux_get_wrapper(cpv, [])
1253                 return matches
1254
1255         def _aux_get_wrapper(self, pkg, wants):
1256                 if pkg in self._aux_get_history:
1257                         return self._aux_get(pkg, wants)
1258                 self._aux_get_history.add(pkg)
1259                 try:
1260                         # Use the live ebuild metadata if possible.
1261                         live_metadata = dict(izip(self._portdb_keys,
1262                                 self._portdb.aux_get(pkg, self._portdb_keys)))
1263                         if not portage.eapi_is_supported(live_metadata["EAPI"]):
1264                                 raise KeyError(pkg)
1265                         self.dbapi.aux_update(pkg, live_metadata)
1266                 except (KeyError, portage.exception.PortageException):
1267                         if self._global_updates is None:
1268                                 self._global_updates = \
1269                                         grab_global_updates(self._portdb.porttree_root)
1270                         perform_global_updates(
1271                                 pkg, self.dbapi, self._global_updates)
1272                 return self._aux_get(pkg, wants)
1273
1274         def sync(self, acquire_lock=1):
1275                 """
1276                 Call this method to synchronize state with the real vardb
1277                 after one or more packages may have been installed or
1278                 uninstalled.
1279                 """
1280                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1281                 try:
1282                         # At least the parent needs to exist for the lock file.
1283                         portage.util.ensure_dirs(vdb_path)
1284                 except portage.exception.PortageException:
1285                         pass
1286                 vdb_lock = None
1287                 try:
1288                         if acquire_lock and os.access(vdb_path, os.W_OK):
1289                                 vdb_lock = portage.locks.lockdir(vdb_path)
1290                         self._sync()
1291                 finally:
1292                         if vdb_lock:
1293                                 portage.locks.unlockdir(vdb_lock)
1294
1295         def _sync(self):
1296
1297                 real_vardb = self._root_config.trees["vartree"].dbapi
1298                 current_cpv_set = frozenset(real_vardb.cpv_all())
1299                 pkg_vardb = self.dbapi
1300                 aux_get_history = self._aux_get_history
1301
1302                 # Remove any packages that have been uninstalled.
1303                 for pkg in list(pkg_vardb):
1304                         if pkg.cpv not in current_cpv_set:
1305                                 pkg_vardb.cpv_remove(pkg)
1306                                 aux_get_history.discard(pkg.cpv)
1307
1308                 # Validate counters and timestamps.
1309                 slot_counters = {}
1310                 root = self.root
1311                 validation_keys = ["COUNTER", "_mtime_"]
1312                 for cpv in current_cpv_set:
1313
1314                         pkg_hash_key = ("installed", root, cpv, "nomerge")
1315                         pkg = pkg_vardb.get(pkg_hash_key)
1316                         if pkg is not None:
1317                                 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1318                                 try:
1319                                         counter = long(counter)
1320                                 except ValueError:
1321                                         counter = 0
1322
1323                                 if counter != pkg.counter or \
1324                                         mtime != pkg.mtime:
1325                                         pkg_vardb.cpv_remove(pkg)
1326                                         aux_get_history.discard(pkg.cpv)
1327                                         pkg = None
1328
1329                         if pkg is None:
1330                                 pkg = self._pkg(cpv)
1331
1332                         other_counter = slot_counters.get(pkg.slot_atom)
1333                         if other_counter is not None:
1334                                 if other_counter > pkg.counter:
1335                                         continue
1336
1337                         slot_counters[pkg.slot_atom] = pkg.counter
1338                         pkg_vardb.cpv_inject(pkg)
1339
1340                 real_vardb.flush_cache()
1341
1342         def _pkg(self, cpv):
1343                 root_config = self._root_config
1344                 real_vardb = root_config.trees["vartree"].dbapi
1345                 pkg = Package(cpv=cpv, installed=True,
1346                         metadata=izip(self._db_keys,
1347                         real_vardb.aux_get(cpv, self._db_keys)),
1348                         root_config=root_config,
1349                         type_name="installed")
1350
1351                 try:
1352                         mycounter = long(pkg.metadata["COUNTER"])
1353                 except ValueError:
1354                         mycounter = 0
1355                         pkg.metadata["COUNTER"] = str(mycounter)
1356
1357                 return pkg
1358
1359 def grab_global_updates(portdir):
1360         from portage.update import grab_updates, parse_updates
1361         updpath = os.path.join(portdir, "profiles", "updates")
1362         try:
1363                 rawupdates = grab_updates(updpath)
1364         except portage.exception.DirectoryNotFound:
1365                 rawupdates = []
1366         upd_commands = []
1367         for mykey, mystat, mycontent in rawupdates:
1368                 commands, errors = parse_updates(mycontent)
1369                 upd_commands.extend(commands)
1370         return upd_commands
1371
1372 def perform_global_updates(mycpv, mydb, mycommands):
1373         from portage.update import update_dbentries
1374         aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1375         aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1376         updates = update_dbentries(mycommands, aux_dict)
1377         if updates:
1378                 mydb.aux_update(mycpv, updates)
1379
1380 def visible(pkgsettings, pkg):
1381         """
1382         Check if a package is visible. This can raise an InvalidDependString
1383         exception if LICENSE is invalid.
1384         TODO: optionally generate a list of masking reasons
1385         @rtype: Boolean
1386         @returns: True if the package is visible, False otherwise.
1387         """
1388         if not pkg.metadata["SLOT"]:
1389                 return False
1390         if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1391                 if not pkgsettings._accept_chost(pkg):
1392                         return False
1393         eapi = pkg.metadata["EAPI"]
1394         if not portage.eapi_is_supported(eapi):
1395                 return False
1396         if not pkg.installed:
1397                 if portage._eapi_is_deprecated(eapi):
1398                         return False
1399                 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1400                         return False
1401         if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1402                 return False
1403         if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1404                 return False
1405         try:
1406                 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1407                         return False
1408         except portage.exception.InvalidDependString:
1409                 return False
1410         return True
1411
1412 def get_masking_status(pkg, pkgsettings, root_config):
1413
1414         mreasons = portage.getmaskingstatus(
1415                 pkg, settings=pkgsettings,
1416                 portdb=root_config.trees["porttree"].dbapi)
1417
1418         if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1419                 if not pkgsettings._accept_chost(pkg):
1420                         mreasons.append("CHOST: %s" % \
1421                                 pkg.metadata["CHOST"])
1422
1423         if not pkg.metadata["SLOT"]:
1424                 mreasons.append("invalid: SLOT is undefined")
1425
1426         return mreasons
1427
1428 def get_mask_info(root_config, cpv, pkgsettings,
1429         db, pkg_type, built, installed, db_keys):
1430         eapi_masked = False
1431         try:
1432                 metadata = dict(izip(db_keys,
1433                         db.aux_get(cpv, db_keys)))
1434         except KeyError:
1435                 metadata = None
1436         if metadata and not built:
1437                 pkgsettings.setcpv(cpv, mydb=metadata)
1438                 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1439         if metadata is None:
1440                 mreasons = ["corruption"]
1441         else:
1442                 pkg = Package(type_name=pkg_type, root_config=root_config,
1443                         cpv=cpv, built=built, installed=installed, metadata=metadata)
1444                 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1445         return metadata, mreasons
1446
1447 def show_masked_packages(masked_packages):
1448         shown_licenses = set()
1449         shown_comments = set()
1450         # Maybe there is both an ebuild and a binary. Only
1451         # show one of them to avoid redundant appearance.
1452         shown_cpvs = set()
1453         have_eapi_mask = False
1454         for (root_config, pkgsettings, cpv,
1455                 metadata, mreasons) in masked_packages:
1456                 if cpv in shown_cpvs:
1457                         continue
1458                 shown_cpvs.add(cpv)
1459                 comment, filename = None, None
1460                 if "package.mask" in mreasons:
1461                         comment, filename = \
1462                                 portage.getmaskingreason(
1463                                 cpv, metadata=metadata,
1464                                 settings=pkgsettings,
1465                                 portdb=root_config.trees["porttree"].dbapi,
1466                                 return_location=True)
1467                 missing_licenses = []
1468                 if metadata:
1469                         if not portage.eapi_is_supported(metadata["EAPI"]):
1470                                 have_eapi_mask = True
1471                         try:
1472                                 missing_licenses = \
1473                                         pkgsettings._getMissingLicenses(
1474                                                 cpv, metadata)
1475                         except portage.exception.InvalidDependString:
1476                                 # This will have already been reported
1477                                 # above via mreasons.
1478                                 pass
1479
1480                 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1481                 if comment and comment not in shown_comments:
1482                         print filename+":"
1483                         print comment
1484                         shown_comments.add(comment)
1485                 portdb = root_config.trees["porttree"].dbapi
1486                 for l in missing_licenses:
1487                         l_path = portdb.findLicensePath(l)
1488                         if l in shown_licenses:
1489                                 continue
1490                         msg = ("A copy of the '%s' license" + \
1491                         " is located at '%s'.") % (l, l_path)
1492                         print msg
1493                         print
1494                         shown_licenses.add(l)
1495         return have_eapi_mask
1496
1497 class Task(SlotObject):
1498         __slots__ = ("_hash_key", "_hash_value")
1499
1500         def _get_hash_key(self):
1501                 hash_key = getattr(self, "_hash_key", None)
1502                 if hash_key is None:
1503                         raise NotImplementedError(self)
1504                 return hash_key
1505
1506         def __eq__(self, other):
1507                 return self._get_hash_key() == other
1508
1509         def __ne__(self, other):
1510                 return self._get_hash_key() != other
1511
1512         def __hash__(self):
1513                 hash_value = getattr(self, "_hash_value", None)
1514                 if hash_value is None:
1515                         self._hash_value = hash(self._get_hash_key())
1516                 return self._hash_value
1517
1518         def __len__(self):
1519                 return len(self._get_hash_key())
1520
1521         def __getitem__(self, key):
1522                 return self._get_hash_key()[key]
1523
1524         def __iter__(self):
1525                 return iter(self._get_hash_key())
1526
1527         def __contains__(self, key):
1528                 return key in self._get_hash_key()
1529
1530         def __str__(self):
1531                 return str(self._get_hash_key())
1532
1533 class Blocker(Task):
1534
1535         __hash__ = Task.__hash__
1536         __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1537
1538         def __init__(self, **kwargs):
1539                 Task.__init__(self, **kwargs)
1540                 self.cp = portage.dep_getkey(self.atom)
1541
1542         def _get_hash_key(self):
1543                 hash_key = getattr(self, "_hash_key", None)
1544                 if hash_key is None:
1545                         self._hash_key = \
1546                                 ("blocks", self.root, self.atom, self.eapi)
1547                 return self._hash_key
1548
1549 class Package(Task):
1550
1551         __hash__ = Task.__hash__
1552         __slots__ = ("built", "cpv", "depth",
1553                 "installed", "metadata", "onlydeps", "operation",
1554                 "root_config", "type_name",
1555                 "category", "counter", "cp", "cpv_split",
1556                 "inherited", "iuse", "mtime",
1557                 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1558
1559         metadata_keys = [
1560                 "CHOST", "COUNTER", "DEPEND", "EAPI",
1561                 "INHERITED", "IUSE", "KEYWORDS",
1562                 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1563                 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1564
1565         def __init__(self, **kwargs):
1566                 Task.__init__(self, **kwargs)
1567                 self.root = self.root_config.root
1568                 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1569                 self.cp = portage.cpv_getkey(self.cpv)
1570                 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, self.slot))
1571                 self.category, self.pf = portage.catsplit(self.cpv)
1572                 self.cpv_split = portage.catpkgsplit(self.cpv)
1573                 self.pv_split = self.cpv_split[1:]
1574
1575         class _use(object):
1576
1577                 __slots__ = ("__weakref__", "enabled")
1578
1579                 def __init__(self, use):
1580                         self.enabled = frozenset(use)
1581
1582         class _iuse(object):
1583
1584                 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1585
1586                 def __init__(self, tokens, iuse_implicit):
1587                         self.tokens = tuple(tokens)
1588                         self.iuse_implicit = iuse_implicit
1589                         enabled = []
1590                         disabled = []
1591                         other = []
1592                         for x in tokens:
1593                                 prefix = x[:1]
1594                                 if prefix == "+":
1595                                         enabled.append(x[1:])
1596                                 elif prefix == "-":
1597                                         disabled.append(x[1:])
1598                                 else:
1599                                         other.append(x)
1600                         self.enabled = frozenset(enabled)
1601                         self.disabled = frozenset(disabled)
1602                         self.all = frozenset(chain(enabled, disabled, other))
1603
1604                 def __getattribute__(self, name):
1605                         if name == "regex":
1606                                 try:
1607                                         return object.__getattribute__(self, "regex")
1608                                 except AttributeError:
1609                                         all = object.__getattribute__(self, "all")
1610                                         iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1611                                         # Escape anything except ".*" which is supposed
1612                                         # to pass through from _get_implicit_iuse()
1613                                         regex = (re.escape(x) for x in chain(all, iuse_implicit))
1614                                         regex = "^(%s)$" % "|".join(regex)
1615                                         regex = regex.replace("\\.\\*", ".*")
1616                                         self.regex = re.compile(regex)
1617                         return object.__getattribute__(self, name)
1618
1619         def _get_hash_key(self):
1620                 hash_key = getattr(self, "_hash_key", None)
1621                 if hash_key is None:
1622                         if self.operation is None:
1623                                 self.operation = "merge"
1624                                 if self.onlydeps or self.installed:
1625                                         self.operation = "nomerge"
1626                         self._hash_key = \
1627                                 (self.type_name, self.root, self.cpv, self.operation)
1628                 return self._hash_key
1629
1630         def __lt__(self, other):
1631                 if other.cp != self.cp:
1632                         return False
1633                 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1634                         return True
1635                 return False
1636
1637         def __le__(self, other):
1638                 if other.cp != self.cp:
1639                         return False
1640                 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1641                         return True
1642                 return False
1643
1644         def __gt__(self, other):
1645                 if other.cp != self.cp:
1646                         return False
1647                 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1648                         return True
1649                 return False
1650
1651         def __ge__(self, other):
1652                 if other.cp != self.cp:
1653                         return False
1654                 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1655                         return True
1656                 return False
1657
1658 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1659         if not x.startswith("UNUSED_"))
1660 _all_metadata_keys.discard("CDEPEND")
1661 _all_metadata_keys.update(Package.metadata_keys)
1662
1663 from portage.cache.mappings import slot_dict_class
1664 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1665
1666 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1667         """
1668         Detect metadata updates and synchronize Package attributes.
1669         """
1670
1671         __slots__ = ("_pkg",)
1672         _wrapped_keys = frozenset(
1673                 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1674
1675         def __init__(self, pkg, metadata):
1676                 _PackageMetadataWrapperBase.__init__(self)
1677                 self._pkg = pkg
1678                 self.update(metadata)
1679
1680         def __setitem__(self, k, v):
1681                 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1682                 if k in self._wrapped_keys:
1683                         getattr(self, "_set_" + k.lower())(k, v)
1684
1685         def _set_inherited(self, k, v):
1686                 if isinstance(v, basestring):
1687                         v = frozenset(v.split())
1688                 self._pkg.inherited = v
1689
1690         def _set_iuse(self, k, v):
1691                 self._pkg.iuse = self._pkg._iuse(
1692                         v.split(), self._pkg.root_config.iuse_implicit)
1693
1694         def _set_slot(self, k, v):
1695                 self._pkg.slot = v
1696
1697         def _set_use(self, k, v):
1698                 self._pkg.use = self._pkg._use(v.split())
1699
1700         def _set_counter(self, k, v):
1701                 if isinstance(v, basestring):
1702                         try:
1703                                 v = long(v.strip())
1704                         except ValueError:
1705                                 v = 0
1706                 self._pkg.counter = v
1707
1708         def _set__mtime_(self, k, v):
1709                 if isinstance(v, basestring):
1710                         try:
1711                                 v = long(v.strip())
1712                         except ValueError:
1713                                 v = 0
1714                 self._pkg.mtime = v
1715
1716 class EbuildFetchonly(SlotObject):
1717
1718         __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1719
1720         def execute(self):
1721                 settings = self.settings
1722                 pkg = self.pkg
1723                 portdb = pkg.root_config.trees["porttree"].dbapi
1724                 ebuild_path = portdb.findname(pkg.cpv)
1725                 settings.setcpv(pkg)
1726                 debug = settings.get("PORTAGE_DEBUG") == "1"
1727                 use_cache = 1 # always true
1728                 portage.doebuild_environment(ebuild_path, "fetch",
1729                         settings["ROOT"], settings, debug, use_cache, portdb)
1730                 restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1731
1732                 if restrict_fetch:
1733                         rval = self._execute_with_builddir()
1734                 else:
1735                         rval = portage.doebuild(ebuild_path, "fetch",
1736                                 settings["ROOT"], settings, debug=debug,
1737                                 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1738                                 mydbapi=portdb, tree="porttree")
1739
1740                         if rval != os.EX_OK:
1741                                 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1742                                 eerror(msg, phase="unpack", key=pkg.cpv)
1743
1744                 return rval
1745
1746         def _execute_with_builddir(self):
1747                 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1748                 # ensuring sane $PWD (bug #239560) and storing elog
1749                 # messages. Use a private temp directory, in order
1750                 # to avoid locking the main one.
1751                 settings = self.settings
1752                 global_tmpdir = settings["PORTAGE_TMPDIR"]
1753                 from tempfile import mkdtemp
1754                 try:
1755                         private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1756                 except OSError, e:
1757                         if e.errno != portage.exception.PermissionDenied.errno:
1758                                 raise
1759                         raise portage.exception.PermissionDenied(global_tmpdir)
1760                 settings["PORTAGE_TMPDIR"] = private_tmpdir
1761                 settings.backup_changes("PORTAGE_TMPDIR")
1762                 try:
1763                         retval = self._execute()
1764                 finally:
1765                         settings["PORTAGE_TMPDIR"] = global_tmpdir
1766                         settings.backup_changes("PORTAGE_TMPDIR")
1767                         shutil.rmtree(private_tmpdir)
1768                 return retval
1769
1770         def _execute(self):
1771                 settings = self.settings
1772                 pkg = self.pkg
1773                 root_config = pkg.root_config
1774                 portdb = root_config.trees["porttree"].dbapi
1775                 ebuild_path = portdb.findname(pkg.cpv)
1776                 debug = settings.get("PORTAGE_DEBUG") == "1"
1777                 portage.prepare_build_dirs(self.pkg.root, self.settings, 0)
1778
1779                 retval = portage.doebuild(ebuild_path, "fetch",
1780                         self.settings["ROOT"], self.settings, debug=debug,
1781                         listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1782                         mydbapi=portdb, tree="porttree")
1783
1784                 if retval != os.EX_OK:
1785                         msg = "Fetch failed for '%s'" % (pkg.cpv,)
1786                         eerror(msg, phase="unpack", key=pkg.cpv)
1787
1788                 portage.elog.elog_process(self.pkg.cpv, self.settings)
1789                 return retval
1790
1791 class PollConstants(object):
1792
1793         """
1794         Provides POLL* constants that are equivalent to those from the
1795         select module, for use by PollSelectAdapter.
1796         """
1797
1798         names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1799         v = 1
1800         for k in names:
1801                 locals()[k] = getattr(select, k, v)
1802                 v *= 2
1803         del k, v
1804
1805 class AsynchronousTask(SlotObject):
1806         """
1807         Subclasses override _wait() and _poll() so that calls
1808         to public methods can be wrapped for implementing
1809         hooks such as exit listener notification.
1810
1811         Sublasses should call self.wait() to notify exit listeners after
1812         the task is complete and self.returncode has been set.
1813         """
1814
1815         __slots__ = ("background", "cancelled", "returncode") + \
1816                 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1817
1818         def start(self):
1819                 """
1820                 Start an asynchronous task and then return as soon as possible.
1821                 """
1822                 self._start_hook()
1823                 self._start()
1824
1825         def _start(self):
1826                 raise NotImplementedError(self)
1827
1828         def isAlive(self):
1829                 return self.returncode is None
1830
1831         def poll(self):
1832                 self._wait_hook()
1833                 return self._poll()
1834
1835         def _poll(self):
1836                 return self.returncode
1837
1838         def wait(self):
1839                 if self.returncode is None:
1840                         self._wait()
1841                 self._wait_hook()
1842                 return self.returncode
1843
1844         def _wait(self):
1845                 return self.returncode
1846
1847         def cancel(self):
1848                 self.cancelled = True
1849                 self.wait()
1850
1851         def addStartListener(self, f):
1852                 """
1853                 The function will be called with one argument, a reference to self.
1854                 """
1855                 if self._start_listeners is None:
1856                         self._start_listeners = []
1857                 self._start_listeners.append(f)
1858
1859         def removeStartListener(self, f):
1860                 if self._start_listeners is None:
1861                         return
1862                 self._start_listeners.remove(f)
1863
1864         def _start_hook(self):
1865                 if self._start_listeners is not None:
1866                         start_listeners = self._start_listeners
1867                         self._start_listeners = None
1868
1869                         for f in start_listeners:
1870                                 f(self)
1871
1872         def addExitListener(self, f):
1873                 """
1874                 The function will be called with one argument, a reference to self.
1875                 """
1876                 if self._exit_listeners is None:
1877                         self._exit_listeners = []
1878                 self._exit_listeners.append(f)
1879
1880         def removeExitListener(self, f):
1881                 if self._exit_listeners is None:
1882                         if self._exit_listener_stack is not None:
1883                                 self._exit_listener_stack.remove(f)
1884                         return
1885                 self._exit_listeners.remove(f)
1886
1887         def _wait_hook(self):
1888                 """
1889                 Call this method after the task completes, just before returning
1890                 the returncode from wait() or poll(). This hook is
1891                 used to trigger exit listeners when the returncode first
1892                 becomes available.
1893                 """
1894                 if self.returncode is not None and \
1895                         self._exit_listeners is not None:
1896
1897                         # This prevents recursion, in case one of the
1898                         # exit handlers triggers this method again by
1899                         # calling wait(). Use a stack that gives
1900                         # removeExitListener() an opportunity to consume
1901                         # listeners from the stack, before they can get
1902                         # called below. This is necessary because a call
1903                         # to one exit listener may result in a call to
1904                         # removeExitListener() for another listener on
1905                         # the stack. That listener needs to be removed
1906                         # from the stack since it would be inconsistent
1907                         # to call it after it has been been passed into
1908                         # removeExitListener().
1909                         self._exit_listener_stack = self._exit_listeners
1910                         self._exit_listeners = None
1911
1912                         self._exit_listener_stack.reverse()
1913                         while self._exit_listener_stack:
1914                                 self._exit_listener_stack.pop()(self)
1915
1916 class AbstractPollTask(AsynchronousTask):
1917
1918         __slots__ = ("scheduler",) + \
1919                 ("_registered",)
1920
1921         _bufsize = 4096
1922         _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1923         _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1924                 _exceptional_events
1925
1926         def _unregister(self):
1927                 raise NotImplementedError(self)
1928
1929         def _unregister_if_appropriate(self, event):
1930                 if self._registered:
1931                         if event & self._exceptional_events:
1932                                 self._unregister()
1933                                 self.cancel()
1934                         elif event & PollConstants.POLLHUP:
1935                                 self._unregister()
1936                                 self.wait()
1937
1938 class PipeReader(AbstractPollTask):
1939
1940         """
1941         Reads output from one or more files and saves it in memory,
1942         for retrieval via the getvalue() method. This is driven by
1943         the scheduler's poll() loop, so it runs entirely within the
1944         current process.
1945         """
1946
1947         __slots__ = ("input_files",) + \
1948                 ("_read_data", "_reg_ids")
1949
1950         def _start(self):
1951                 self._reg_ids = set()
1952                 self._read_data = []
1953                 for k, f in self.input_files.iteritems():
1954                         fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1955                                 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1956                         self._reg_ids.add(self.scheduler.register(f.fileno(),
1957                                 self._registered_events, self._output_handler))
1958                 self._registered = True
1959
1960         def isAlive(self):
1961                 return self._registered
1962
1963         def cancel(self):
1964                 if self.returncode is None:
1965                         self.returncode = 1
1966                         self.cancelled = True
1967                 self.wait()
1968
1969         def _wait(self):
1970                 if self.returncode is not None:
1971                         return self.returncode
1972
1973                 if self._registered:
1974                         self.scheduler.schedule(self._reg_ids)
1975                         self._unregister()
1976
1977                 self.returncode = os.EX_OK
1978                 return self.returncode
1979
1980         def getvalue(self):
1981                 """Retrieve the entire contents"""
1982                 return "".join(self._read_data)
1983
1984         def close(self):
1985                 """Free the memory buffer."""
1986                 self._read_data = None
1987
1988         def _output_handler(self, fd, event):
1989
1990                 if event & PollConstants.POLLIN:
1991
1992                         for f in self.input_files.itervalues():
1993                                 if fd == f.fileno():
1994                                         break
1995
1996                         buf = array.array('B')
1997                         try:
1998                                 buf.fromfile(f, self._bufsize)
1999                         except EOFError:
2000                                 pass
2001
2002                         if buf:
2003                                 self._read_data.append(buf.tostring())
2004                         else:
2005                                 self._unregister()
2006                                 self.wait()
2007
2008                 self._unregister_if_appropriate(event)
2009                 return self._registered
2010
2011         def _unregister(self):
2012                 """
2013                 Unregister from the scheduler and close open files.
2014                 """
2015
2016                 self._registered = False
2017
2018                 if self._reg_ids is not None:
2019                         for reg_id in self._reg_ids:
2020                                 self.scheduler.unregister(reg_id)
2021                         self._reg_ids = None
2022
2023                 if self.input_files is not None:
2024                         for f in self.input_files.itervalues():
2025                                 f.close()
2026                         self.input_files = None
2027
2028 class CompositeTask(AsynchronousTask):
2029
2030         __slots__ = ("scheduler",) + ("_current_task",)
2031
2032         def isAlive(self):
2033                 return self._current_task is not None
2034
2035         def cancel(self):
2036                 self.cancelled = True
2037                 if self._current_task is not None:
2038                         self._current_task.cancel()
2039
2040         def _poll(self):
2041                 """
2042                 This does a loop calling self._current_task.poll()
2043                 repeatedly as long as the value of self._current_task
2044                 keeps changing. It calls poll() a maximum of one time
2045                 for a given self._current_task instance. This is useful
2046                 since calling poll() on a task can trigger advance to
2047                 the next task could eventually lead to the returncode
2048                 being set in cases when polling only a single task would
2049                 not have the same effect.
2050                 """
2051
2052                 prev = None
2053                 while True:
2054                         task = self._current_task
2055                         if task is None or task is prev:
2056                                 # don't poll the same task more than once
2057                                 break
2058                         task.poll()
2059                         prev = task
2060
2061                 return self.returncode
2062
2063         def _wait(self):
2064
2065                 prev = None
2066                 while True:
2067                         task = self._current_task
2068                         if task is None:
2069                                 # don't wait for the same task more than once
2070                                 break
2071                         if task is prev:
2072                                 # Before the task.wait() method returned, an exit
2073                                 # listener should have set self._current_task to either
2074                                 # a different task or None. Something is wrong.
2075                                 raise AssertionError("self._current_task has not " + \
2076                                         "changed since calling wait", self, task)
2077                         task.wait()
2078                         prev = task
2079
2080                 return self.returncode
2081
2082         def _assert_current(self, task):
2083                 """
2084                 Raises an AssertionError if the given task is not the
2085                 same one as self._current_task. This can be useful
2086                 for detecting bugs.
2087                 """
2088                 if task is not self._current_task:
2089                         raise AssertionError("Unrecognized task: %s" % (task,))
2090
2091         def _default_exit(self, task):
2092                 """
2093                 Calls _assert_current() on the given task and then sets the
2094                 composite returncode attribute if task.returncode != os.EX_OK.
2095                 If the task failed then self._current_task will be set to None.
2096                 Subclasses can use this as a generic task exit callback.
2097
2098                 @rtype: int
2099                 @returns: The task.returncode attribute.
2100                 """
2101                 self._assert_current(task)
2102                 if task.returncode != os.EX_OK:
2103                         self.returncode = task.returncode
2104                         self._current_task = None
2105                 return task.returncode
2106
2107         def _final_exit(self, task):
2108                 """
2109                 Assumes that task is the final task of this composite task.
2110                 Calls _default_exit() and sets self.returncode to the task's
2111                 returncode and sets self._current_task to None.
2112                 """
2113                 self._default_exit(task)
2114                 self._current_task = None
2115                 self.returncode = task.returncode
2116                 return self.returncode
2117
2118         def _default_final_exit(self, task):
2119                 """
2120                 This calls _final_exit() and then wait().
2121
2122                 Subclasses can use this as a generic final task exit callback.
2123
2124                 """
2125                 self._final_exit(task)
2126                 return self.wait()
2127
2128         def _start_task(self, task, exit_handler):
2129                 """
2130                 Register exit handler for the given task, set it
2131                 as self._current_task, and call task.start().
2132
2133                 Subclasses can use this as a generic way to start
2134                 a task.
2135
2136                 """
2137                 task.addExitListener(exit_handler)
2138                 self._current_task = task
2139                 task.start()
2140
2141 class TaskSequence(CompositeTask):
2142         """
2143         A collection of tasks that executes sequentially. Each task
2144         must have a addExitListener() method that can be used as
2145         a means to trigger movement from one task to the next.
2146         """
2147
2148         __slots__ = ("_task_queue",)
2149
2150         def __init__(self, **kwargs):
2151                 AsynchronousTask.__init__(self, **kwargs)
2152                 self._task_queue = deque()
2153
2154         def add(self, task):
2155                 self._task_queue.append(task)
2156
2157         def _start(self):
2158                 self._start_next_task()
2159
2160         def cancel(self):
2161                 self._task_queue.clear()
2162                 CompositeTask.cancel(self)
2163
2164         def _start_next_task(self):
2165                 self._start_task(self._task_queue.popleft(),
2166                         self._task_exit_handler)
2167
2168         def _task_exit_handler(self, task):
2169                 if self._default_exit(task) != os.EX_OK:
2170                         self.wait()
2171                 elif self._task_queue:
2172                         self._start_next_task()
2173                 else:
2174                         self._final_exit(task)
2175                         self.wait()
2176
2177 class SubProcess(AbstractPollTask):
2178
2179         __slots__ = ("pid",) + \
2180                 ("_files", "_reg_id")
2181
2182         # A file descriptor is required for the scheduler to monitor changes from
2183         # inside a poll() loop. When logging is not enabled, create a pipe just to
2184         # serve this purpose alone.
2185         _dummy_pipe_fd = 9
2186
2187         def _poll(self):
2188                 if self.returncode is not None:
2189                         return self.returncode
2190                 if self.pid is None:
2191                         return self.returncode
2192                 if self._registered:
2193                         return self.returncode
2194
2195                 try:
2196                         retval = os.waitpid(self.pid, os.WNOHANG)
2197                 except OSError, e:
2198                         if e.errno != errno.ECHILD:
2199                                 raise
2200                         del e
2201                         retval = (self.pid, 1)
2202
2203                 if retval == (0, 0):
2204                         return None
2205                 self._set_returncode(retval)
2206                 return self.returncode
2207
2208         def cancel(self):
2209                 if self.isAlive():
2210                         try:
2211                                 os.kill(self.pid, signal.SIGTERM)
2212                         except OSError, e:
2213                                 if e.errno != errno.ESRCH:
2214                                         raise
2215                                 del e
2216
2217                 self.cancelled = True
2218                 if self.pid is not None:
2219                         self.wait()
2220                 return self.returncode
2221
2222         def isAlive(self):
2223                 return self.pid is not None and \
2224                         self.returncode is None
2225
2226         def _wait(self):
2227
2228                 if self.returncode is not None:
2229                         return self.returncode
2230
2231                 if self._registered:
2232                         self.scheduler.schedule(self._reg_id)
2233                         self._unregister()
2234                         if self.returncode is not None:
2235                                 return self.returncode
2236
2237                 try:
2238                         wait_retval = os.waitpid(self.pid, 0)
2239                 except OSError, e:
2240                         if e.errno != errno.ECHILD:
2241                                 raise
2242                         del e
2243                         self._set_returncode((self.pid, 1))
2244                 else:
2245                         self._set_returncode(wait_retval)
2246
2247                 return self.returncode
2248
2249         def _unregister(self):
2250                 """
2251                 Unregister from the scheduler and close open files.
2252                 """
2253
2254                 self._registered = False
2255
2256                 if self._reg_id is not None:
2257                         self.scheduler.unregister(self._reg_id)
2258                         self._reg_id = None
2259
2260                 if self._files is not None:
2261                         for f in self._files.itervalues():
2262                                 f.close()
2263                         self._files = None
2264
2265         def _set_returncode(self, wait_retval):
2266
2267                 retval = wait_retval[1]
2268
2269                 if retval != os.EX_OK:
2270                         if retval & 0xff:
2271                                 retval = (retval & 0xff) << 8
2272                         else:
2273                                 retval = retval >> 8
2274
2275                 self.returncode = retval
2276
2277 class SpawnProcess(SubProcess):
2278
2279         """
2280         Constructor keyword args are passed into portage.process.spawn().
2281         The required "args" keyword argument will be passed as the first
2282         spawn() argument.
2283         """
2284
2285         _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2286                 "uid", "gid", "groups", "umask", "logfile",
2287                 "path_lookup", "pre_exec")
2288
2289         __slots__ = ("args",) + \
2290                 _spawn_kwarg_names
2291
2292         _file_names = ("log", "process", "stdout")
2293         _files_dict = slot_dict_class(_file_names, prefix="")
2294
2295         def _start(self):
2296
2297                 if self.cancelled:
2298                         return
2299
2300                 if self.fd_pipes is None:
2301                         self.fd_pipes = {}
2302                 fd_pipes = self.fd_pipes
2303                 fd_pipes.setdefault(0, sys.stdin.fileno())
2304                 fd_pipes.setdefault(1, sys.stdout.fileno())
2305                 fd_pipes.setdefault(2, sys.stderr.fileno())
2306
2307                 # flush any pending output
2308                 for fd in fd_pipes.itervalues():
2309                         if fd == sys.stdout.fileno():
2310                                 sys.stdout.flush()
2311                         if fd == sys.stderr.fileno():
2312                                 sys.stderr.flush()
2313
2314                 logfile = self.logfile
2315                 self._files = self._files_dict()
2316                 files = self._files
2317
2318                 master_fd, slave_fd = self._pipe(fd_pipes)
2319                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2320                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2321
2322                 null_input = None
2323                 fd_pipes_orig = fd_pipes.copy()
2324                 if self.background:
2325                         # TODO: Use job control functions like tcsetpgrp() to control
2326                         # access to stdin. Until then, use /dev/null so that any
2327                         # attempts to read from stdin will immediately return EOF
2328                         # instead of blocking indefinitely.
2329                         null_input = open('/dev/null', 'rb')
2330                         fd_pipes[0] = null_input.fileno()
2331                 else:
2332                         fd_pipes[0] = fd_pipes_orig[0]
2333
2334                 files.process = os.fdopen(master_fd, 'rb')
2335                 if logfile is not None:
2336
2337                         fd_pipes[1] = slave_fd
2338                         fd_pipes[2] = slave_fd
2339
2340                         files.log = open(logfile, mode='ab')
2341                         portage.util.apply_secpass_permissions(logfile,
2342                                 uid=portage.portage_uid, gid=portage.portage_gid,
2343                                 mode=0660)
2344
2345                         if not self.background:
2346                                 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2347
2348                         output_handler = self._output_handler
2349
2350                 else:
2351
2352                         # Create a dummy pipe so the scheduler can monitor
2353                         # the process from inside a poll() loop.
2354                         fd_pipes[self._dummy_pipe_fd] = slave_fd
2355                         if self.background:
2356                                 fd_pipes[1] = slave_fd
2357                                 fd_pipes[2] = slave_fd
2358                         output_handler = self._dummy_handler
2359
2360                 kwargs = {}
2361                 for k in self._spawn_kwarg_names:
2362                         v = getattr(self, k)
2363                         if v is not None:
2364                                 kwargs[k] = v
2365
2366                 kwargs["fd_pipes"] = fd_pipes
2367                 kwargs["returnpid"] = True
2368                 kwargs.pop("logfile", None)
2369
2370                 self._reg_id = self.scheduler.register(files.process.fileno(),
2371                         self._registered_events, output_handler)
2372                 self._registered = True
2373
2374                 retval = self._spawn(self.args, **kwargs)
2375
2376                 os.close(slave_fd)
2377                 if null_input is not None:
2378                         null_input.close()
2379
2380                 if isinstance(retval, int):
2381                         # spawn failed
2382                         self._unregister()
2383                         self.returncode = retval
2384                         self.wait()
2385                         return
2386
2387                 self.pid = retval[0]
2388                 portage.process.spawned_pids.remove(self.pid)
2389
2390         def _pipe(self, fd_pipes):
2391                 """
2392                 @type fd_pipes: dict
2393                 @param fd_pipes: pipes from which to copy terminal size if desired.
2394                 """
2395                 return os.pipe()
2396
2397         def _spawn(self, args, **kwargs):
2398                 return portage.process.spawn(args, **kwargs)
2399
2400         def _output_handler(self, fd, event):
2401
2402                 if event & PollConstants.POLLIN:
2403
2404                         files = self._files
2405                         buf = array.array('B')
2406                         try:
2407                                 buf.fromfile(files.process, self._bufsize)
2408                         except EOFError:
2409                                 pass
2410
2411                         if buf:
2412                                 if not self.background:
2413                                         buf.tofile(files.stdout)
2414                                         files.stdout.flush()
2415                                 buf.tofile(files.log)
2416                                 files.log.flush()
2417                         else:
2418                                 self._unregister()
2419                                 self.wait()
2420
2421                 self._unregister_if_appropriate(event)
2422                 return self._registered
2423
2424         def _dummy_handler(self, fd, event):
2425                 """
2426                 This method is mainly interested in detecting EOF, since
2427                 the only purpose of the pipe is to allow the scheduler to
2428                 monitor the process from inside a poll() loop.
2429                 """
2430
2431                 if event & PollConstants.POLLIN:
2432
2433                         buf = array.array('B')
2434                         try:
2435                                 buf.fromfile(self._files.process, self._bufsize)
2436                         except EOFError:
2437                                 pass
2438
2439                         if buf:
2440                                 pass
2441                         else:
2442                                 self._unregister()
2443                                 self.wait()
2444
2445                 self._unregister_if_appropriate(event)
2446                 return self._registered
2447
2448 class MiscFunctionsProcess(SpawnProcess):
2449         """
2450         Spawns misc-functions.sh with an existing ebuild environment.
2451         """
2452
2453         __slots__ = ("commands", "phase", "pkg", "settings")
2454
2455         def _start(self):
2456                 settings = self.settings
2457                 settings.pop("EBUILD_PHASE", None)
2458                 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2459                 misc_sh_binary = os.path.join(portage_bin_path,
2460                         os.path.basename(portage.const.MISC_SH_BINARY))
2461
2462                 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2463                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2464
2465                 portage._doebuild_exit_status_unlink(
2466                         settings.get("EBUILD_EXIT_STATUS_FILE"))
2467
2468                 SpawnProcess._start(self)
2469
2470         def _spawn(self, args, **kwargs):
2471                 settings = self.settings
2472                 debug = settings.get("PORTAGE_DEBUG") == "1"
2473                 return portage.spawn(" ".join(args), settings,
2474                         debug=debug, **kwargs)
2475
2476         def _set_returncode(self, wait_retval):
2477                 SpawnProcess._set_returncode(self, wait_retval)
2478                 self.returncode = portage._doebuild_exit_status_check_and_log(
2479                         self.settings, self.phase, self.returncode)
2480
2481 class EbuildFetcher(SpawnProcess):
2482
2483         __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2484                 ("_build_dir",)
2485
2486         def _start(self):
2487
2488                 root_config = self.pkg.root_config
2489                 portdb = root_config.trees["porttree"].dbapi
2490                 ebuild_path = portdb.findname(self.pkg.cpv)
2491                 settings = self.config_pool.allocate()
2492                 settings.setcpv(self.pkg)
2493
2494                 # In prefetch mode, logging goes to emerge-fetch.log and the builddir
2495                 # should not be touched since otherwise it could interfere with
2496                 # another instance of the same cpv concurrently being built for a
2497                 # different $ROOT (currently, builds only cooperate with prefetchers
2498                 # that are spawned for the same $ROOT).
2499                 if not self.prefetch:
2500                         self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2501                         self._build_dir.lock()
2502                         self._build_dir.clean()
2503                         portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2504                         if self.logfile is None:
2505                                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2506
2507                 phase = "fetch"
2508                 if self.fetchall:
2509                         phase = "fetchall"
2510
2511                 # If any incremental variables have been overridden
2512                 # via the environment, those values need to be passed
2513                 # along here so that they are correctly considered by
2514                 # the config instance in the subproccess.
2515                 fetch_env = os.environ.copy()
2516
2517                 nocolor = settings.get("NOCOLOR")
2518                 if nocolor is not None:
2519                         fetch_env["NOCOLOR"] = nocolor
2520
2521                 fetch_env["PORTAGE_NICENESS"] = "0"
2522                 if self.prefetch:
2523                         fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2524
2525                 ebuild_binary = os.path.join(
2526                         settings["PORTAGE_BIN_PATH"], "ebuild")
2527
2528                 fetch_args = [ebuild_binary, ebuild_path, phase]
2529                 debug = settings.get("PORTAGE_DEBUG") == "1"
2530                 if debug:
2531                         fetch_args.append("--debug")
2532
2533                 self.args = fetch_args
2534                 self.env = fetch_env
2535                 SpawnProcess._start(self)
2536
2537         def _pipe(self, fd_pipes):
2538                 """When appropriate, use a pty so that fetcher progress bars,
2539                 like wget has, will work properly."""
2540                 if self.background or not sys.stdout.isatty():
2541                         # When the output only goes to a log file,
2542                         # there's no point in creating a pty.
2543                         return os.pipe()
2544                 stdout_pipe = fd_pipes.get(1)
2545                 got_pty, master_fd, slave_fd = \
2546                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2547                 return (master_fd, slave_fd)
2548
2549         def _set_returncode(self, wait_retval):
2550                 SpawnProcess._set_returncode(self, wait_retval)
2551                 # Collect elog messages that might have been
2552                 # created by the pkg_nofetch phase.
2553                 if self._build_dir is not None:
2554                         # Skip elog messages for prefetch, in order to avoid duplicates.
2555                         if not self.prefetch and self.returncode != os.EX_OK:
2556                                 elog_out = None
2557                                 if self.logfile is not None:
2558                                         if self.background:
2559                                                 elog_out = open(self.logfile, 'a')
2560                                 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2561                                 if self.logfile is not None:
2562                                         msg += ", Log file:"
2563                                 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2564                                 if self.logfile is not None:
2565                                         eerror(" '%s'" % (self.logfile,),
2566                                                 phase="unpack", key=self.pkg.cpv, out=elog_out)
2567                                 if elog_out is not None:
2568                                         elog_out.close()
2569                         if not self.prefetch:
2570                                 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2571                         features = self._build_dir.settings.features
2572                         if self.returncode == os.EX_OK:
2573                                 self._build_dir.clean()
2574                         self._build_dir.unlock()
2575                         self.config_pool.deallocate(self._build_dir.settings)
2576                         self._build_dir = None
2577
2578 class EbuildBuildDir(SlotObject):
2579
2580         __slots__ = ("dir_path", "pkg", "settings",
2581                 "locked", "_catdir", "_lock_obj")
2582
2583         def __init__(self, **kwargs):
2584                 SlotObject.__init__(self, **kwargs)
2585                 self.locked = False
2586
2587         def lock(self):
2588                 """
2589                 This raises an AlreadyLocked exception if lock() is called
2590                 while a lock is already held. In order to avoid this, call
2591                 unlock() or check whether the "locked" attribute is True
2592                 or False before calling lock().
2593                 """
2594                 if self._lock_obj is not None:
2595                         raise self.AlreadyLocked((self._lock_obj,))
2596
2597                 dir_path = self.dir_path
2598                 if dir_path is None:
2599                         root_config = self.pkg.root_config
2600                         portdb = root_config.trees["porttree"].dbapi
2601                         ebuild_path = portdb.findname(self.pkg.cpv)
2602                         settings = self.settings
2603                         settings.setcpv(self.pkg)
2604                         debug = settings.get("PORTAGE_DEBUG") == "1"
2605                         use_cache = 1 # always true
2606                         portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2607                                 self.settings, debug, use_cache, portdb)
2608                         dir_path = self.settings["PORTAGE_BUILDDIR"]
2609
2610                 catdir = os.path.dirname(dir_path)
2611                 self._catdir = catdir
2612
2613                 portage.util.ensure_dirs(os.path.dirname(catdir),
2614                         gid=portage.portage_gid,
2615                         mode=070, mask=0)
2616                 catdir_lock = None
2617                 try:
2618                         catdir_lock = portage.locks.lockdir(catdir)
2619                         portage.util.ensure_dirs(catdir,
2620                                 gid=portage.portage_gid,
2621                                 mode=070, mask=0)
2622                         self._lock_obj = portage.locks.lockdir(dir_path)
2623                 finally:
2624                         self.locked = self._lock_obj is not None
2625                         if catdir_lock is not None:
2626                                 portage.locks.unlockdir(catdir_lock)
2627
2628         def clean(self):
2629                 """Uses shutil.rmtree() rather than spawning a 'clean' phase. Disabled
2630                 by keepwork or keeptemp in FEATURES."""
2631                 settings = self.settings
2632                 features = settings.features
2633                 if not ("keepwork" in features or "keeptemp" in features):
2634                         try:
2635                                 shutil.rmtree(settings["PORTAGE_BUILDDIR"])
2636                         except EnvironmentError, e:
2637                                 if e.errno != errno.ENOENT:
2638                                         raise
2639                                 del e
2640
2641         def unlock(self):
2642                 if self._lock_obj is None:
2643                         return
2644
2645                 portage.locks.unlockdir(self._lock_obj)
2646                 self._lock_obj = None
2647                 self.locked = False
2648
2649                 catdir = self._catdir
2650                 catdir_lock = None
2651                 try:
2652                         catdir_lock = portage.locks.lockdir(catdir)
2653                 finally:
2654                         if catdir_lock:
2655                                 try:
2656                                         os.rmdir(catdir)
2657                                 except OSError, e:
2658                                         if e.errno not in (errno.ENOENT,
2659                                                 errno.ENOTEMPTY, errno.EEXIST):
2660                                                 raise
2661                                         del e
2662                                 portage.locks.unlockdir(catdir_lock)
2663
2664         class AlreadyLocked(portage.exception.PortageException):
2665                 pass
2666
2667 class EbuildBuild(CompositeTask):
2668
2669         __slots__ = ("args_set", "config_pool", "find_blockers",
2670                 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2671                 "prefetcher", "settings", "world_atom") + \
2672                 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2673
2674         def _start(self):
2675
2676                 logger = self.logger
2677                 opts = self.opts
2678                 pkg = self.pkg
2679                 settings = self.settings
2680                 world_atom = self.world_atom
2681                 root_config = pkg.root_config
2682                 tree = "porttree"
2683                 self._tree = tree
2684                 portdb = root_config.trees[tree].dbapi
2685                 settings.setcpv(pkg)
2686                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2687                 ebuild_path = portdb.findname(self.pkg.cpv)
2688                 self._ebuild_path = ebuild_path
2689
2690                 prefetcher = self.prefetcher
2691                 if prefetcher is None:
2692                         pass
2693                 elif not prefetcher.isAlive():
2694                         prefetcher.cancel()
2695                 elif prefetcher.poll() is None:
2696
2697                         waiting_msg = "Fetching files " + \
2698                                 "in the background. " + \
2699                                 "To view fetch progress, run `tail -f " + \
2700                                 "/var/log/emerge-fetch.log` in another " + \
2701                                 "terminal."
2702                         msg_prefix = colorize("GOOD", " * ")
2703                         from textwrap import wrap
2704                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2705                                 for line in wrap(waiting_msg, 65))
2706                         if not self.background:
2707                                 writemsg(waiting_msg, noiselevel=-1)
2708
2709                         self._current_task = prefetcher
2710                         prefetcher.addExitListener(self._prefetch_exit)
2711                         return
2712
2713                 self._prefetch_exit(prefetcher)
2714
2715         def _prefetch_exit(self, prefetcher):
2716
2717                 opts = self.opts
2718                 pkg = self.pkg
2719                 settings = self.settings
2720
2721                 if opts.fetchonly:
2722                                 fetcher = EbuildFetchonly(
2723                                         fetch_all=opts.fetch_all_uri,
2724                                         pkg=pkg, pretend=opts.pretend,
2725                                         settings=settings)
2726                                 retval = fetcher.execute()
2727                                 self.returncode = retval
2728                                 self.wait()
2729                                 return
2730
2731                 fetcher = EbuildFetcher(config_pool=self.config_pool,
2732                         fetchall=opts.fetch_all_uri,
2733                         fetchonly=opts.fetchonly,
2734                         background=self.background,
2735                         pkg=pkg, scheduler=self.scheduler)
2736
2737                 self._start_task(fetcher, self._fetch_exit)
2738
2739         def _fetch_exit(self, fetcher):
2740                 opts = self.opts
2741                 pkg = self.pkg
2742
2743                 fetch_failed = False
2744                 if opts.fetchonly:
2745                         fetch_failed = self._final_exit(fetcher) != os.EX_OK
2746                 else:
2747                         fetch_failed = self._default_exit(fetcher) != os.EX_OK
2748
2749                 if fetch_failed and fetcher.logfile is not None and \
2750                         os.path.exists(fetcher.logfile):
2751                         self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2752
2753                 if not fetch_failed and fetcher.logfile is not None:
2754                         # Fetch was successful, so remove the fetch log.
2755                         try:
2756                                 os.unlink(fetcher.logfile)
2757                         except OSError:
2758                                 pass
2759
2760                 if fetch_failed or opts.fetchonly:
2761                         self.wait()
2762                         return
2763
2764                 logger = self.logger
2765                 opts = self.opts
2766                 pkg_count = self.pkg_count
2767                 scheduler = self.scheduler
2768                 settings = self.settings
2769                 features = settings.features
2770                 ebuild_path = self._ebuild_path
2771                 system_set = pkg.root_config.sets["system"]
2772
2773                 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2774                 self._build_dir.lock()
2775
2776                 # Cleaning is triggered before the setup
2777                 # phase, in portage.doebuild().
2778                 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2779                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2780                 short_msg = "emerge: (%s of %s) %s Clean" % \
2781                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2782                 logger.log(msg, short_msg=short_msg)
2783
2784                 #buildsyspkg: Check if we need to _force_ binary package creation
2785                 self._issyspkg = "buildsyspkg" in features and \
2786                                 system_set.findAtomForPackage(pkg) and \
2787                                 not opts.buildpkg
2788
2789                 if opts.buildpkg or self._issyspkg:
2790
2791                         self._buildpkg = True
2792
2793                         msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2794                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2795                         short_msg = "emerge: (%s of %s) %s Compile" % \
2796                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2797                         logger.log(msg, short_msg=short_msg)
2798
2799                 else:
2800                         msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2801                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2802                         short_msg = "emerge: (%s of %s) %s Compile" % \
2803                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2804                         logger.log(msg, short_msg=short_msg)
2805
2806                 build = EbuildExecuter(background=self.background, pkg=pkg,
2807                         scheduler=scheduler, settings=settings)
2808                 self._start_task(build, self._build_exit)
2809
2810         def _unlock_builddir(self):
2811                 portage.elog.elog_process(self.pkg.cpv, self.settings)
2812                 self._build_dir.unlock()
2813
2814         def _build_exit(self, build):
2815                 if self._default_exit(build) != os.EX_OK:
2816                         self._unlock_builddir()
2817                         self.wait()
2818                         return
2819
2820                 opts = self.opts
2821                 buildpkg = self._buildpkg
2822
2823                 if not buildpkg:
2824                         self._final_exit(build)
2825                         self.wait()
2826                         return
2827
2828                 if self._issyspkg:
2829                         msg = ">>> This is a system package, " + \
2830                                 "let's pack a rescue tarball.\n"
2831
2832                         log_path = self.settings.get("PORTAGE_LOG_FILE")
2833                         if log_path is not None:
2834                                 log_file = open(log_path, 'a')
2835                                 try:
2836                                         log_file.write(msg)
2837                                 finally:
2838                                         log_file.close()
2839
2840                         if not self.background:
2841                                 portage.writemsg_stdout(msg, noiselevel=-1)
2842
2843                 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2844                         scheduler=self.scheduler, settings=self.settings)
2845
2846                 self._start_task(packager, self._buildpkg_exit)
2847
2848         def _buildpkg_exit(self, packager):
2849                 """
2850                 Released build dir lock when there is a failure or
2851                 when in buildpkgonly mode. Otherwise, the lock will
2852                 be released when merge() is called.
2853                 """
2854
2855                 if self._default_exit(packager) != os.EX_OK:
2856                         self._unlock_builddir()
2857                         self.wait()
2858                         return
2859
2860                 if self.opts.buildpkgonly:
2861                         # Need to call "clean" phase for buildpkgonly mode
2862                         portage.elog.elog_process(self.pkg.cpv, self.settings)
2863                         phase = "clean"
2864                         clean_phase = EbuildPhase(background=self.background,
2865                                 pkg=self.pkg, phase=phase,
2866                                 scheduler=self.scheduler, settings=self.settings,
2867                                 tree=self._tree)
2868                         self._start_task(clean_phase, self._clean_exit)
2869                         return
2870
2871                 # Continue holding the builddir lock until
2872                 # after the package has been installed.
2873                 self._current_task = None
2874                 self.returncode = packager.returncode
2875                 self.wait()
2876
2877         def _clean_exit(self, clean_phase):
2878                 if self._final_exit(clean_phase) != os.EX_OK or \
2879                         self.opts.buildpkgonly:
2880                         self._unlock_builddir()
2881                 self.wait()
2882
2883         def install(self):
2884                 """
2885                 Install the package and then clean up and release locks.
2886                 Only call this after the build has completed successfully
2887                 and neither fetchonly nor buildpkgonly mode are enabled.
2888                 """
2889
2890                 find_blockers = self.find_blockers
2891                 ldpath_mtimes = self.ldpath_mtimes
2892                 logger = self.logger
2893                 pkg = self.pkg
2894                 pkg_count = self.pkg_count
2895                 settings = self.settings
2896                 world_atom = self.world_atom
2897                 ebuild_path = self._ebuild_path
2898                 tree = self._tree
2899
2900                 merge = EbuildMerge(find_blockers=self.find_blockers,
2901                         ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2902                         pkg_count=pkg_count, pkg_path=ebuild_path,
2903                         scheduler=self.scheduler,
2904                         settings=settings, tree=tree, world_atom=world_atom)
2905
2906                 msg = " === (%s of %s) Merging (%s::%s)" % \
2907                         (pkg_count.curval, pkg_count.maxval,
2908                         pkg.cpv, ebuild_path)
2909                 short_msg = "emerge: (%s of %s) %s Merge" % \
2910                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2911                 logger.log(msg, short_msg=short_msg)
2912
2913                 try:
2914                         rval = merge.execute()
2915                 finally:
2916                         self._unlock_builddir()
2917
2918                 return rval
2919
2920 class EbuildExecuter(CompositeTask):
2921
2922         __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2923
2924         _phases = ("prepare", "configure", "compile", "test", "install")
2925
2926         _live_eclasses = frozenset([
2927                 "bzr",
2928                 "cvs",
2929                 "darcs",
2930                 "git",
2931                 "mercurial",
2932                 "subversion"
2933         ])
2934
2935         def _start(self):
2936                 self._tree = "porttree"
2937                 pkg = self.pkg
2938                 phase = "clean"
2939                 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2940                         scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2941                 self._start_task(clean_phase, self._clean_phase_exit)
2942
2943         def _clean_phase_exit(self, clean_phase):
2944
2945                 if self._default_exit(clean_phase) != os.EX_OK:
2946                         self.wait()
2947                         return
2948
2949                 pkg = self.pkg
2950                 scheduler = self.scheduler
2951                 settings = self.settings
2952                 cleanup = 1
2953
2954                 # This initializes PORTAGE_LOG_FILE.
2955                 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2956
2957                 setup_phase = EbuildPhase(background=self.background,
2958                         pkg=pkg, phase="setup", scheduler=scheduler,
2959                         settings=settings, tree=self._tree)
2960
2961                 setup_phase.addExitListener(self._setup_exit)
2962                 self._current_task = setup_phase
2963                 self.scheduler.scheduleSetup(setup_phase)
2964
2965         def _setup_exit(self, setup_phase):
2966
2967                 if self._default_exit(setup_phase) != os.EX_OK:
2968                         self.wait()
2969                         return
2970
2971                 unpack_phase = EbuildPhase(background=self.background,
2972                         pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2973                         settings=self.settings, tree=self._tree)
2974
2975                 if self._live_eclasses.intersection(self.pkg.inherited):
2976                         # Serialize $DISTDIR access for live ebuilds since
2977                         # otherwise they can interfere with eachother.
2978
2979                         unpack_phase.addExitListener(self._unpack_exit)
2980                         self._current_task = unpack_phase
2981                         self.scheduler.scheduleUnpack(unpack_phase)
2982
2983                 else:
2984                         self._start_task(unpack_phase, self._unpack_exit)
2985
2986         def _unpack_exit(self, unpack_phase):
2987
2988                 if self._default_exit(unpack_phase) != os.EX_OK:
2989                         self.wait()
2990                         return
2991
2992                 ebuild_phases = TaskSequence(scheduler=self.scheduler)
2993
2994                 pkg = self.pkg
2995                 phases = self._phases
2996                 eapi = pkg.metadata["EAPI"]
2997                 if eapi in ("0", "1"):
2998                         # skip src_prepare and src_configure
2999                         phases = phases[2:]
3000
3001                 for phase in phases:
3002                         ebuild_phases.add(EbuildPhase(background=self.background,
3003                                 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3004                                 settings=self.settings, tree=self._tree))
3005
3006                 self._start_task(ebuild_phases, self._default_final_exit)
3007
3008 class EbuildMetadataPhase(SubProcess):
3009
3010         """
3011         Asynchronous interface for the ebuild "depend" phase which is
3012         used to extract metadata from the ebuild.
3013         """
3014
3015         __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3016                 "ebuild_mtime", "portdb", "repo_path", "settings") + \
3017                 ("_raw_metadata",)
3018
3019         _file_names = ("ebuild",)
3020         _files_dict = slot_dict_class(_file_names, prefix="")
3021         _metadata_fd = 9
3022
3023         def _start(self):
3024                 settings = self.settings
3025                 settings.reset()
3026                 ebuild_path = self.ebuild_path
3027                 debug = settings.get("PORTAGE_DEBUG") == "1"
3028                 master_fd = None
3029                 slave_fd = None
3030                 fd_pipes = None
3031                 if self.fd_pipes is not None:
3032                         fd_pipes = self.fd_pipes.copy()
3033                 else:
3034                         fd_pipes = {}
3035
3036                 fd_pipes.setdefault(0, sys.stdin.fileno())
3037                 fd_pipes.setdefault(1, sys.stdout.fileno())
3038                 fd_pipes.setdefault(2, sys.stderr.fileno())
3039
3040                 # flush any pending output
3041                 for fd in fd_pipes.itervalues():
3042                         if fd == sys.stdout.fileno():
3043                                 sys.stdout.flush()
3044                         if fd == sys.stderr.fileno():
3045                                 sys.stderr.flush()
3046
3047                 fd_pipes_orig = fd_pipes.copy()
3048                 self._files = self._files_dict()
3049                 files = self._files
3050
3051                 master_fd, slave_fd = os.pipe()
3052                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3053                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3054
3055                 fd_pipes[self._metadata_fd] = slave_fd
3056
3057                 self._raw_metadata = []
3058                 files.ebuild = os.fdopen(master_fd, 'r')
3059                 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3060                         self._registered_events, self._output_handler)
3061                 self._registered = True
3062
3063                 retval = portage.doebuild(ebuild_path, "depend",
3064                         settings["ROOT"], settings, debug,
3065                         mydbapi=self.portdb, tree="porttree",
3066                         fd_pipes=fd_pipes, returnpid=True)
3067
3068                 os.close(slave_fd)
3069
3070                 if isinstance(retval, int):
3071                         # doebuild failed before spawning
3072                         self._unregister()
3073                         self.returncode = retval
3074                         self.wait()
3075                         return
3076
3077                 self.pid = retval[0]
3078                 portage.process.spawned_pids.remove(self.pid)
3079
3080         def _output_handler(self, fd, event):
3081
3082                 if event & PollConstants.POLLIN:
3083                         self._raw_metadata.append(self._files.ebuild.read())
3084                         if not self._raw_metadata[-1]:
3085                                 self._unregister()
3086                                 self.wait()
3087
3088                 self._unregister_if_appropriate(event)
3089                 return self._registered
3090
3091         def _set_returncode(self, wait_retval):
3092                 SubProcess._set_returncode(self, wait_retval)
3093                 if self.returncode == os.EX_OK:
3094                         metadata_lines = "".join(self._raw_metadata).splitlines()
3095                         if len(portage.auxdbkeys) != len(metadata_lines):
3096                                 # Don't trust bash's returncode if the
3097                                 # number of lines is incorrect.
3098                                 self.returncode = 1
3099                         else:
3100                                 metadata = izip(portage.auxdbkeys, metadata_lines)
3101                                 self.metadata_callback(self.cpv, self.ebuild_path,
3102                                         self.repo_path, metadata, self.ebuild_mtime)
3103
3104 class EbuildProcess(SpawnProcess):
3105
3106         __slots__ = ("phase", "pkg", "settings", "tree")
3107
3108         def _start(self):
3109                 # Don't open the log file during the clean phase since the
3110                 # open file can result in an nfs lock on $T/build.log which
3111                 # prevents the clean phase from removing $T.
3112                 if self.phase not in ("clean", "cleanrm"):
3113                         self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3114                 SpawnProcess._start(self)
3115
3116         def _pipe(self, fd_pipes):
3117                 stdout_pipe = fd_pipes.get(1)
3118                 got_pty, master_fd, slave_fd = \
3119                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3120                 return (master_fd, slave_fd)
3121
3122         def _spawn(self, args, **kwargs):
3123
3124                 root_config = self.pkg.root_config
3125                 tree = self.tree
3126                 mydbapi = root_config.trees[tree].dbapi
3127                 settings = self.settings
3128                 ebuild_path = settings["EBUILD"]
3129                 debug = settings.get("PORTAGE_DEBUG") == "1"
3130
3131                 rval = portage.doebuild(ebuild_path, self.phase,
3132                         root_config.root, settings, debug,
3133                         mydbapi=mydbapi, tree=tree, **kwargs)
3134
3135                 return rval
3136
3137         def _set_returncode(self, wait_retval):
3138                 SpawnProcess._set_returncode(self, wait_retval)
3139
3140                 if self.phase not in ("clean", "cleanrm"):
3141                         self.returncode = portage._doebuild_exit_status_check_and_log(
3142                                 self.settings, self.phase, self.returncode)
3143
3144                 if self.phase == "test" and self.returncode != os.EX_OK and \
3145                         "test-fail-continue" in self.settings.features:
3146                         self.returncode = os.EX_OK
3147
3148                 portage._post_phase_userpriv_perms(self.settings)
3149
3150 class EbuildPhase(CompositeTask):
3151
3152         __slots__ = ("background", "pkg", "phase",
3153                 "scheduler", "settings", "tree")
3154
3155         _post_phase_cmds = portage._post_phase_cmds
3156
3157         def _start(self):
3158
3159                 ebuild_process = EbuildProcess(background=self.background,
3160                         pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3161                         settings=self.settings, tree=self.tree)
3162
3163                 self._start_task(ebuild_process, self._ebuild_exit)
3164
3165         def _ebuild_exit(self, ebuild_process):
3166
3167                 if self.phase == "install":
3168                         out = None
3169                         log_path = self.settings.get("PORTAGE_LOG_FILE")
3170                         log_file = None
3171                         if self.background and log_path is not None:
3172                                 log_file = open(log_path, 'a')
3173                                 out = log_file
3174                         try:
3175                                 portage._check_build_log(self.settings, out=out)
3176                         finally:
3177                                 if log_file is not None:
3178                                         log_file.close()
3179
3180                 if self._default_exit(ebuild_process) != os.EX_OK:
3181                         self.wait()
3182                         return
3183
3184                 settings = self.settings
3185
3186                 if self.phase == "install":
3187                         portage._post_src_install_uid_fix(settings)
3188
3189                 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3190                 if post_phase_cmds is not None:
3191                         post_phase = MiscFunctionsProcess(background=self.background,
3192                                 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3193                                 scheduler=self.scheduler, settings=settings)
3194                         self._start_task(post_phase, self._post_phase_exit)
3195                         return
3196
3197                 self.returncode = ebuild_process.returncode
3198                 self._current_task = None
3199                 self.wait()
3200
3201         def _post_phase_exit(self, post_phase):
3202                 if self._final_exit(post_phase) != os.EX_OK:
3203                         writemsg("!!! post %s failed; exiting.\n" % self.phase,
3204                                 noiselevel=-1)
3205                 self._current_task = None
3206                 self.wait()
3207                 return
3208
3209 class EbuildBinpkg(EbuildProcess):
3210         """
3211         This assumes that src_install() has successfully completed.
3212         """
3213         __slots__ = ("_binpkg_tmpfile",)
3214
3215         def _start(self):
3216                 self.phase = "package"
3217                 self.tree = "porttree"
3218                 pkg = self.pkg
3219                 root_config = pkg.root_config
3220                 portdb = root_config.trees["porttree"].dbapi
3221                 bintree = root_config.trees["bintree"]
3222                 ebuild_path = portdb.findname(self.pkg.cpv)
3223                 settings = self.settings
3224                 debug = settings.get("PORTAGE_DEBUG") == "1"
3225
3226                 bintree.prevent_collision(pkg.cpv)
3227                 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3228                         pkg.cpv + ".tbz2." + str(os.getpid()))
3229                 self._binpkg_tmpfile = binpkg_tmpfile
3230                 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3231                 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3232
3233                 try:
3234                         EbuildProcess._start(self)
3235                 finally:
3236                         settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3237
3238         def _set_returncode(self, wait_retval):
3239                 EbuildProcess._set_returncode(self, wait_retval)
3240
3241                 pkg = self.pkg
3242                 bintree = pkg.root_config.trees["bintree"]
3243                 binpkg_tmpfile = self._binpkg_tmpfile
3244                 if self.returncode == os.EX_OK:
3245                         bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3246
3247 class EbuildMerge(SlotObject):
3248
3249         __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3250                 "pkg", "pkg_count", "pkg_path", "pretend",
3251                 "scheduler", "settings", "tree", "world_atom")
3252
3253         def execute(self):
3254                 root_config = self.pkg.root_config
3255                 settings = self.settings
3256                 retval = portage.merge(settings["CATEGORY"],
3257                         settings["PF"], settings["D"],
3258                         os.path.join(settings["PORTAGE_BUILDDIR"],
3259                         "build-info"), root_config.root, settings,
3260                         myebuild=settings["EBUILD"],
3261                         mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3262                         vartree=root_config.trees["vartree"],
3263                         prev_mtimes=self.ldpath_mtimes,
3264                         scheduler=self.scheduler,
3265                         blockers=self.find_blockers)
3266
3267                 if retval == os.EX_OK:
3268                         self.world_atom(self.pkg)
3269                         self._log_success()
3270
3271                 return retval
3272
3273         def _log_success(self):
3274                 pkg = self.pkg
3275                 pkg_count = self.pkg_count
3276                 pkg_path = self.pkg_path
3277                 logger = self.logger
3278                 if "noclean" not in self.settings.features:
3279                         short_msg = "emerge: (%s of %s) %s Clean Post" % \
3280                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3281                         logger.log((" === (%s of %s) " + \
3282                                 "Post-Build Cleaning (%s::%s)") % \
3283                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3284                                 short_msg=short_msg)
3285                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3286                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3287
3288 class PackageUninstall(AsynchronousTask):
3289
3290         __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3291
3292         def _start(self):
3293                 try:
3294                         unmerge(self.pkg.root_config, self.opts, "unmerge",
3295                                 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3296                                 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3297                                 writemsg_level=self._writemsg_level)
3298                 except UninstallFailure, e:
3299                         self.returncode = e.status
3300                 else:
3301                         self.returncode = os.EX_OK
3302                 self.wait()
3303
3304         def _writemsg_level(self, msg, level=0, noiselevel=0):
3305
3306                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3307                 background = self.background
3308
3309                 if log_path is None:
3310                         if not (background and level < logging.WARNING):
3311                                 portage.util.writemsg_level(msg,
3312                                         level=level, noiselevel=noiselevel)
3313                 else:
3314                         if not background:
3315                                 portage.util.writemsg_level(msg,
3316                                         level=level, noiselevel=noiselevel)
3317
3318                         f = open(log_path, 'a')
3319                         try:
3320                                 f.write(msg)
3321                         finally:
3322                                 f.close()
3323
3324 class Binpkg(CompositeTask):
3325
3326         __slots__ = ("find_blockers",
3327                 "ldpath_mtimes", "logger", "opts",
3328                 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3329                 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3330                 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3331
3332         def _writemsg_level(self, msg, level=0, noiselevel=0):
3333
3334                 if not self.background:
3335                         portage.util.writemsg_level(msg,
3336                                 level=level, noiselevel=noiselevel)
3337
3338                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3339                 if  log_path is not None:
3340                         f = open(log_path, 'a')
3341                         try:
3342                                 f.write(msg)
3343                         finally:
3344                                 f.close()
3345
3346         def _start(self):
3347
3348                 pkg = self.pkg
3349                 settings = self.settings
3350                 settings.setcpv(pkg)
3351                 self._tree = "bintree"
3352                 self._bintree = self.pkg.root_config.trees[self._tree]
3353                 self._verify = not self.opts.pretend
3354
3355                 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3356                         "portage", pkg.category, pkg.pf)
3357                 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3358                         pkg=pkg, settings=settings)
3359                 self._image_dir = os.path.join(dir_path, "image")
3360                 self._infloc = os.path.join(dir_path, "build-info")
3361                 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3362                 settings["EBUILD"] = self._ebuild_path
3363                 debug = settings.get("PORTAGE_DEBUG") == "1"
3364                 portage.doebuild_environment(self._ebuild_path, "setup",
3365                         settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3366                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3367
3368                 # The prefetcher has already completed or it
3369                 # could be running now. If it's running now,
3370                 # wait for it to complete since it holds
3371                 # a lock on the file being fetched. The
3372                 # portage.locks functions are only designed
3373                 # to work between separate processes. Since
3374                 # the lock is held by the current process,
3375                 # use the scheduler and fetcher methods to
3376                 # synchronize with the fetcher.
3377                 prefetcher = self.prefetcher
3378                 if prefetcher is None:
3379                         pass
3380                 elif not prefetcher.isAlive():
3381                         prefetcher.cancel()
3382                 elif prefetcher.poll() is None:
3383
3384                         waiting_msg = ("Fetching '%s' " + \
3385                                 "in the background. " + \
3386                                 "To view fetch progress, run `tail -f " + \
3387                                 "/var/log/emerge-fetch.log` in another " + \
3388                                 "terminal.") % prefetcher.pkg_path
3389                         msg_prefix = colorize("GOOD", " * ")
3390                         from textwrap import wrap
3391                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3392                                 for line in wrap(waiting_msg, 65))
3393                         if not self.background:
3394                                 writemsg(waiting_msg, noiselevel=-1)
3395
3396                         self._current_task = prefetcher
3397                         prefetcher.addExitListener(self._prefetch_exit)
3398                         return
3399
3400                 self._prefetch_exit(prefetcher)
3401
3402         def _prefetch_exit(self, prefetcher):
3403
3404                 pkg = self.pkg
3405                 pkg_count = self.pkg_count
3406                 if not (self.opts.pretend or self.opts.fetchonly):
3407                         self._build_dir.lock()
3408                         try:
3409                                 shutil.rmtree(self._build_dir.dir_path)
3410                         except EnvironmentError, e:
3411                                 if e.errno != errno.ENOENT:
3412                                         raise
3413                                 del e
3414                         portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3415                 fetcher = BinpkgFetcher(background=self.background,
3416                         logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3417                         pretend=self.opts.pretend, scheduler=self.scheduler)
3418                 pkg_path = fetcher.pkg_path
3419                 self._pkg_path = pkg_path
3420
3421                 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3422
3423                         msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3424                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3425                         short_msg = "emerge: (%s of %s) %s Fetch" % \
3426                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3427                         self.logger.log(msg, short_msg=short_msg)
3428                         self._start_task(fetcher, self._fetcher_exit)
3429                         return
3430
3431                 self._fetcher_exit(fetcher)
3432
3433         def _fetcher_exit(self, fetcher):
3434
3435                 # The fetcher only has a returncode when
3436                 # --getbinpkg is enabled.
3437                 if fetcher.returncode is not None:
3438                         self._fetched_pkg = True
3439                         if self._default_exit(fetcher) != os.EX_OK:
3440                                 self._unlock_builddir()
3441                                 self.wait()
3442                                 return
3443
3444                 if self.opts.pretend:
3445                         self._current_task = None
3446                         self.returncode = os.EX_OK
3447                         self.wait()
3448                         return
3449
3450                 verifier = None
3451                 if self._verify:
3452                         logfile = None
3453                         if self.background:
3454                                 logfile = self.settings.get("PORTAGE_LOG_FILE")
3455                         verifier = BinpkgVerifier(background=self.background,
3456                                 logfile=logfile, pkg=self.pkg)
3457                         self._start_task(verifier, self._verifier_exit)
3458                         return
3459
3460                 self._verifier_exit(verifier)
3461
3462         def _verifier_exit(self, verifier):
3463                 if verifier is not None and \
3464                         self._default_exit(verifier) != os.EX_OK:
3465                         self._unlock_builddir()
3466                         self.wait()
3467                         return
3468
3469                 logger = self.logger
3470                 pkg = self.pkg
3471                 pkg_count = self.pkg_count
3472                 pkg_path = self._pkg_path
3473
3474                 if self._fetched_pkg:
3475                         self._bintree.inject(pkg.cpv, filename=pkg_path)
3476
3477                 if self.opts.fetchonly:
3478                         self._current_task = None
3479                         self.returncode = os.EX_OK
3480                         self.wait()
3481                         return
3482
3483                 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3484                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3485                 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3486                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3487                 logger.log(msg, short_msg=short_msg)
3488
3489                 phase = "clean"
3490                 settings = self.settings
3491                 ebuild_phase = EbuildPhase(background=self.background,
3492                         pkg=pkg, phase=phase, scheduler=self.scheduler,
3493                         settings=settings, tree=self._tree)
3494
3495                 self._start_task(ebuild_phase, self._clean_exit)
3496
3497         def _clean_exit(self, clean_phase):
3498                 if self._default_exit(clean_phase) != os.EX_OK:
3499                         self._unlock_builddir()
3500                         self.wait()
3501                         return
3502
3503                 dir_path = self._build_dir.dir_path
3504
3505                 try:
3506                         shutil.rmtree(dir_path)
3507                 except (IOError, OSError), e:
3508                         if e.errno != errno.ENOENT:
3509                                 raise
3510                         del e
3511
3512                 infloc = self._infloc
3513                 pkg = self.pkg
3514                 pkg_path = self._pkg_path
3515
3516                 dir_mode = 0755
3517                 for mydir in (dir_path, self._image_dir, infloc):
3518                         portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3519                                 gid=portage.data.portage_gid, mode=dir_mode)
3520
3521                 # This initializes PORTAGE_LOG_FILE.
3522                 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3523                 self._writemsg_level(">>> Extracting info\n")
3524
3525                 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3526                 check_missing_metadata = ("CATEGORY", "PF")
3527                 missing_metadata = set()
3528                 for k in check_missing_metadata:
3529                         v = pkg_xpak.getfile(k)
3530                         if not v:
3531                                 missing_metadata.add(k)
3532
3533                 pkg_xpak.unpackinfo(infloc)
3534                 for k in missing_metadata:
3535                         if k == "CATEGORY":
3536                                 v = pkg.category
3537                         elif k == "PF":
3538                                 v = pkg.pf
3539                         else:
3540                                 continue
3541
3542                         f = open(os.path.join(infloc, k), 'wb')
3543                         try:
3544                                 f.write(v + "\n")
3545                         finally:
3546                                 f.close()
3547
3548                 # Store the md5sum in the vdb.
3549                 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3550                 try:
3551                         f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3552                 finally:
3553                         f.close()
3554
3555                 # This gives bashrc users an opportunity to do various things
3556                 # such as remove binary packages after they're installed.
3557                 settings = self.settings
3558                 settings.setcpv(self.pkg)
3559                 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3560                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3561
3562                 phase = "setup"
3563                 setup_phase = EbuildPhase(background=self.background,
3564                         pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3565                         settings=settings, tree=self._tree)
3566
3567                 setup_phase.addExitListener(self._setup_exit)
3568                 self._current_task = setup_phase
3569                 self.scheduler.scheduleSetup(setup_phase)
3570
3571         def _setup_exit(self, setup_phase):
3572                 if self._default_exit(setup_phase) != os.EX_OK:
3573                         self._unlock_builddir()
3574                         self.wait()
3575                         return
3576
3577                 extractor = BinpkgExtractorAsync(background=self.background,
3578                         image_dir=self._image_dir,
3579                         pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3580                 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3581                 self._start_task(extractor, self._extractor_exit)
3582
3583         def _extractor_exit(self, extractor):
3584                 if self._final_exit(extractor) != os.EX_OK:
3585                         self._unlock_builddir()
3586                         writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3587                                 noiselevel=-1)
3588                 self.wait()
3589
3590         def _unlock_builddir(self):
3591                 if self.opts.pretend or self.opts.fetchonly:
3592                         return
3593                 portage.elog.elog_process(self.pkg.cpv, self.settings)
3594                 self._build_dir.unlock()
3595
3596         def install(self):
3597
3598                 # This gives bashrc users an opportunity to do various things
3599                 # such as remove binary packages after they're installed.
3600                 settings = self.settings
3601                 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3602                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3603
3604                 merge = EbuildMerge(find_blockers=self.find_blockers,
3605                         ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3606                         pkg=self.pkg, pkg_count=self.pkg_count,
3607                         pkg_path=self._pkg_path, scheduler=self.scheduler,
3608                         settings=settings, tree=self._tree, world_atom=self.world_atom)
3609
3610                 try:
3611                         retval = merge.execute()
3612                 finally:
3613                         settings.pop("PORTAGE_BINPKG_FILE", None)
3614                         self._unlock_builddir()
3615                 return retval
3616
3617 class BinpkgFetcher(SpawnProcess):
3618
3619         __slots__ = ("pkg", "pretend",
3620                 "locked", "pkg_path", "_lock_obj")
3621
3622         def __init__(self, **kwargs):
3623                 SpawnProcess.__init__(self, **kwargs)
3624                 pkg = self.pkg
3625                 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3626
3627         def _start(self):
3628
3629                 if self.cancelled:
3630                         return
3631
3632                 pkg = self.pkg
3633                 pretend = self.pretend
3634                 bintree = pkg.root_config.trees["bintree"]
3635                 settings = bintree.settings
3636                 use_locks = "distlocks" in settings.features
3637                 pkg_path = self.pkg_path
3638
3639                 if not pretend:
3640                         portage.util.ensure_dirs(os.path.dirname(pkg_path))
3641                         if use_locks:
3642                                 self.lock()
3643                 exists = os.path.exists(pkg_path)
3644                 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3645                 if not (pretend or resume):
3646                         # Remove existing file or broken symlink.
3647                         try:
3648                                 os.unlink(pkg_path)
3649                         except OSError:
3650                                 pass
3651
3652                 # urljoin doesn't work correctly with
3653                 # unrecognized protocols like sftp
3654                 if bintree._remote_has_index:
3655                         rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3656                         if not rel_uri:
3657                                 rel_uri = pkg.cpv + ".tbz2"
3658                         uri = bintree._remote_base_uri.rstrip("/") + \
3659                                 "/" + rel_uri.lstrip("/")
3660                 else:
3661                         uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3662                                 "/" + pkg.pf + ".tbz2"
3663
3664                 if pretend:
3665                         portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3666                         self.returncode = os.EX_OK
3667                         self.wait()
3668                         return
3669
3670                 protocol = urlparse.urlparse(uri)[0]
3671                 fcmd_prefix = "FETCHCOMMAND"
3672                 if resume:
3673                         fcmd_prefix = "RESUMECOMMAND"
3674                 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3675                 if not fcmd:
3676                         fcmd = settings.get(fcmd_prefix)
3677
3678                 fcmd_vars = {
3679                         "DISTDIR" : os.path.dirname(pkg_path),
3680                         "URI"     : uri,
3681                         "FILE"    : os.path.basename(pkg_path)
3682                 }
3683
3684                 fetch_env = dict(settings.iteritems())
3685                 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3686                         for x in shlex.split(fcmd)]
3687
3688                 if self.fd_pipes is None:
3689                         self.fd_pipes = {}
3690                 fd_pipes = self.fd_pipes
3691
3692                 # Redirect all output to stdout since some fetchers like
3693                 # wget pollute stderr (if portage detects a problem then it
3694                 # can send it's own message to stderr).
3695                 fd_pipes.setdefault(0, sys.stdin.fileno())
3696                 fd_pipes.setdefault(1, sys.stdout.fileno())
3697                 fd_pipes.setdefault(2, sys.stdout.fileno())
3698
3699                 self.args = fetch_args
3700                 self.env = fetch_env
3701                 SpawnProcess._start(self)
3702
3703         def _set_returncode(self, wait_retval):
3704                 SpawnProcess._set_returncode(self, wait_retval)
3705                 if self.returncode == os.EX_OK:
3706                         # If possible, update the mtime to match the remote package if
3707                         # the fetcher didn't already do it automatically.
3708                         bintree = self.pkg.root_config.trees["bintree"]
3709                         if bintree._remote_has_index:
3710                                 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3711                                 if remote_mtime is not None:
3712                                         try:
3713                                                 remote_mtime = long(remote_mtime)
3714                                         except ValueError:
3715                                                 pass
3716                                         else:
3717                                                 try:
3718                                                         local_mtime = long(os.stat(self.pkg_path).st_mtime)
3719                                                 except OSError:
3720                                                         pass
3721                                                 else:
3722                                                         if remote_mtime != local_mtime:
3723                                                                 try:
3724                                                                         os.utime(self.pkg_path,
3725                                                                                 (remote_mtime, remote_mtime))
3726                                                                 except OSError:
3727                                                                         pass
3728
3729                 if self.locked:
3730                         self.unlock()
3731
3732         def lock(self):
3733                 """
3734                 This raises an AlreadyLocked exception if lock() is called
3735                 while a lock is already held. In order to avoid this, call
3736                 unlock() or check whether the "locked" attribute is True
3737                 or False before calling lock().
3738                 """
3739                 if self._lock_obj is not None:
3740                         raise self.AlreadyLocked((self._lock_obj,))
3741
3742                 self._lock_obj = portage.locks.lockfile(
3743                         self.pkg_path, wantnewlockfile=1)
3744                 self.locked = True
3745
3746         class AlreadyLocked(portage.exception.PortageException):
3747                 pass
3748
3749         def unlock(self):
3750                 if self._lock_obj is None:
3751                         return
3752                 portage.locks.unlockfile(self._lock_obj)
3753                 self._lock_obj = None
3754                 self.locked = False
3755
3756 class BinpkgVerifier(AsynchronousTask):
3757         __slots__ = ("logfile", "pkg",)
3758
3759         def _start(self):
3760                 """
3761                 Note: Unlike a normal AsynchronousTask.start() method,
3762                 this one does all work is synchronously. The returncode
3763                 attribute will be set before it returns.
3764                 """
3765
3766                 pkg = self.pkg
3767                 root_config = pkg.root_config
3768                 bintree = root_config.trees["bintree"]
3769                 rval = os.EX_OK
3770                 stdout_orig = sys.stdout
3771                 stderr_orig = sys.stderr
3772                 log_file = None
3773                 if self.background and self.logfile is not None:
3774                         log_file = open(self.logfile, 'a')
3775                 try:
3776                         if log_file is not None:
3777                                 sys.stdout = log_file
3778                                 sys.stderr = log_file
3779                         try:
3780                                 bintree.digestCheck(pkg)
3781                         except portage.exception.FileNotFound:
3782                                 writemsg("!!! Fetching Binary failed " + \
3783                                         "for '%s'\n" % pkg.cpv, noiselevel=-1)
3784                                 rval = 1
3785                         except portage.exception.DigestException, e:
3786                                 writemsg("\n!!! Digest verification failed:\n",
3787                                         noiselevel=-1)
3788                                 writemsg("!!! %s\n" % e.value[0],
3789                                         noiselevel=-1)
3790                                 writemsg("!!! Reason: %s\n" % e.value[1],
3791                                         noiselevel=-1)
3792                                 writemsg("!!! Got: %s\n" % e.value[2],
3793                                         noiselevel=-1)
3794                                 writemsg("!!! Expected: %s\n" % e.value[3],
3795                                         noiselevel=-1)
3796                                 rval = 1
3797                         if rval != os.EX_OK:
3798                                 pkg_path = bintree.getname(pkg.cpv)
3799                                 head, tail = os.path.split(pkg_path)
3800                                 temp_filename = portage._checksum_failure_temp_file(head, tail)
3801                                 writemsg("File renamed to '%s'\n" % (temp_filename,),
3802                                         noiselevel=-1)
3803                 finally:
3804                         sys.stdout = stdout_orig
3805                         sys.stderr = stderr_orig
3806                         if log_file is not None:
3807                                 log_file.close()
3808
3809                 self.returncode = rval
3810                 self.wait()
3811
3812 class BinpkgPrefetcher(CompositeTask):
3813
3814         __slots__ = ("pkg",) + \
3815                 ("pkg_path", "_bintree",)
3816
3817         def _start(self):
3818                 self._bintree = self.pkg.root_config.trees["bintree"]
3819                 fetcher = BinpkgFetcher(background=self.background,
3820                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3821                         scheduler=self.scheduler)
3822                 self.pkg_path = fetcher.pkg_path
3823                 self._start_task(fetcher, self._fetcher_exit)
3824
3825         def _fetcher_exit(self, fetcher):
3826
3827                 if self._default_exit(fetcher) != os.EX_OK:
3828                         self.wait()
3829                         return
3830
3831                 verifier = BinpkgVerifier(background=self.background,
3832                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3833                 self._start_task(verifier, self._verifier_exit)
3834
3835         def _verifier_exit(self, verifier):
3836                 if self._default_exit(verifier) != os.EX_OK:
3837                         self.wait()
3838                         return
3839
3840                 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3841
3842                 self._current_task = None
3843                 self.returncode = os.EX_OK
3844                 self.wait()
3845
3846 class BinpkgExtractorAsync(SpawnProcess):
3847
3848         __slots__ = ("image_dir", "pkg", "pkg_path")
3849
3850         _shell_binary = portage.const.BASH_BINARY
3851
3852         def _start(self):
3853                 self.args = [self._shell_binary, "-c",
3854                         "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3855                         (portage._shell_quote(self.pkg_path),
3856                         portage._shell_quote(self.image_dir))]
3857
3858                 self.env = self.pkg.root_config.settings.environ()
3859                 SpawnProcess._start(self)
3860
3861 class MergeListItem(CompositeTask):
3862
3863         """
3864         TODO: For parallel scheduling, everything here needs asynchronous
3865         execution support (start, poll, and wait methods).
3866         """
3867
3868         __slots__ = ("args_set",
3869                 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3870                 "find_blockers", "logger", "mtimedb", "pkg",
3871                 "pkg_count", "pkg_to_replace", "prefetcher",
3872                 "settings", "statusMessage", "world_atom") + \
3873                 ("_install_task",)
3874
3875         def _start(self):
3876
3877                 pkg = self.pkg
3878                 build_opts = self.build_opts
3879
3880                 if pkg.installed:
3881                         # uninstall,  executed by self.merge()
3882                         self.returncode = os.EX_OK
3883                         self.wait()
3884                         return
3885
3886                 args_set = self.args_set
3887                 find_blockers = self.find_blockers
3888                 logger = self.logger
3889                 mtimedb = self.mtimedb
3890                 pkg_count = self.pkg_count
3891                 scheduler = self.scheduler
3892                 settings = self.settings
3893                 world_atom = self.world_atom
3894                 ldpath_mtimes = mtimedb["ldpath"]
3895
3896                 action_desc = "Emerging"
3897                 preposition = "for"
3898                 if pkg.type_name == "binary":
3899                         action_desc += " binary"
3900
3901                 if build_opts.fetchonly:
3902                         action_desc = "Fetching"
3903
3904                 msg = "%s (%s of %s) %s" % \
3905                         (action_desc,
3906                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3907                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3908                         colorize("GOOD", pkg.cpv))
3909
3910                 portdb = pkg.root_config.trees["porttree"].dbapi
3911                 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3912                 if portdir_repo_name:
3913                         pkg_repo_name = pkg.metadata.get("repository")
3914                         if pkg_repo_name != portdir_repo_name:
3915                                 if not pkg_repo_name:
3916                                         pkg_repo_name = "unknown repo"
3917                                 msg += " from %s" % pkg_repo_name
3918
3919                 if pkg.root != "/":
3920                         msg += " %s %s" % (preposition, pkg.root)
3921
3922                 if not build_opts.pretend:
3923                         self.statusMessage(msg)
3924                         logger.log(" >>> emerge (%s of %s) %s to %s" % \
3925                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3926
3927                 if pkg.type_name == "ebuild":
3928
3929                         build = EbuildBuild(args_set=args_set,
3930                                 background=self.background,
3931                                 config_pool=self.config_pool,
3932                                 find_blockers=find_blockers,
3933                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3934                                 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3935                                 prefetcher=self.prefetcher, scheduler=scheduler,
3936                                 settings=settings, world_atom=world_atom)
3937
3938                         self._install_task = build
3939                         self._start_task(build, self._default_final_exit)
3940                         return
3941
3942                 elif pkg.type_name == "binary":
3943
3944                         binpkg = Binpkg(background=self.background,
3945                                 find_blockers=find_blockers,
3946                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3947                                 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3948                                 prefetcher=self.prefetcher, settings=settings,
3949                                 scheduler=scheduler, world_atom=world_atom)
3950
3951                         self._install_task = binpkg
3952                         self._start_task(binpkg, self._default_final_exit)
3953                         return
3954
3955         def _poll(self):
3956                 self._install_task.poll()
3957                 return self.returncode
3958
3959         def _wait(self):
3960                 self._install_task.wait()
3961                 return self.returncode
3962
3963         def merge(self):
3964
3965                 pkg = self.pkg
3966                 build_opts = self.build_opts
3967                 find_blockers = self.find_blockers
3968                 logger = self.logger
3969                 mtimedb = self.mtimedb
3970                 pkg_count = self.pkg_count
3971                 prefetcher = self.prefetcher
3972                 scheduler = self.scheduler
3973                 settings = self.settings
3974                 world_atom = self.world_atom
3975                 ldpath_mtimes = mtimedb["ldpath"]
3976
3977                 if pkg.installed:
3978                         if not (build_opts.buildpkgonly or \
3979                                 build_opts.fetchonly or build_opts.pretend):
3980
3981                                 uninstall = PackageUninstall(background=self.background,
3982                                         ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
3983                                         pkg=pkg, scheduler=scheduler, settings=settings)
3984
3985                                 uninstall.start()
3986                                 retval = uninstall.wait()
3987                                 if retval != os.EX_OK:
3988                                         return retval
3989                         return os.EX_OK
3990
3991                 if build_opts.fetchonly or \
3992                         build_opts.buildpkgonly:
3993                         return self.returncode
3994
3995                 retval = self._install_task.install()
3996                 return retval
3997
3998 class PackageMerge(AsynchronousTask):
3999         """
4000         TODO: Implement asynchronous merge so that the scheduler can
4001         run while a merge is executing.
4002         """
4003
4004         __slots__ = ("merge",)
4005
4006         def _start(self):
4007
4008                 pkg = self.merge.pkg
4009                 pkg_count = self.merge.pkg_count
4010
4011                 if pkg.installed:
4012                         action_desc = "Uninstalling"
4013                         preposition = "from"
4014                 else:
4015                         action_desc = "Installing"
4016                         preposition = "to"
4017
4018                 msg = "%s %s" % (action_desc, colorize("GOOD", pkg.cpv))
4019
4020                 if pkg.root != "/":
4021                         msg += " %s %s" % (preposition, pkg.root)
4022
4023                 if not self.merge.build_opts.fetchonly and \
4024                         not self.merge.build_opts.pretend and \
4025                         not self.merge.build_opts.buildpkgonly:
4026                         self.merge.statusMessage(msg)
4027
4028                 self.returncode = self.merge.merge()
4029                 self.wait()
4030
4031 class DependencyArg(object):
4032         def __init__(self, arg=None, root_config=None):
4033                 self.arg = arg
4034                 self.root_config = root_config
4035
4036         def __str__(self):
4037                 return str(self.arg)
4038
4039 class AtomArg(DependencyArg):
4040         def __init__(self, atom=None, **kwargs):
4041                 DependencyArg.__init__(self, **kwargs)
4042                 self.atom = atom
4043                 if not isinstance(self.atom, portage.dep.Atom):
4044                         self.atom = portage.dep.Atom(self.atom)
4045                 self.set = (self.atom, )
4046
4047 class PackageArg(DependencyArg):
4048         def __init__(self, package=None, **kwargs):
4049                 DependencyArg.__init__(self, **kwargs)
4050                 self.package = package
4051                 self.atom = portage.dep.Atom("=" + package.cpv)
4052                 self.set = (self.atom, )
4053
4054 class SetArg(DependencyArg):
4055         def __init__(self, set=None, **kwargs):
4056                 DependencyArg.__init__(self, **kwargs)
4057                 self.set = set
4058                 self.name = self.arg[len(SETPREFIX):]
4059
4060 class Dependency(SlotObject):
4061         __slots__ = ("atom", "blocker", "depth",
4062                 "parent", "onlydeps", "priority", "root")
4063         def __init__(self, **kwargs):
4064                 SlotObject.__init__(self, **kwargs)
4065                 if self.priority is None:
4066                         self.priority = DepPriority()
4067                 if self.depth is None:
4068                         self.depth = 0
4069
4070 class BlockerCache(portage.cache.mappings.MutableMapping):
4071         """This caches blockers of installed packages so that dep_check does not
4072         have to be done for every single installed package on every invocation of
4073         emerge.  The cache is invalidated whenever it is detected that something
4074         has changed that might alter the results of dep_check() calls:
4075                 1) the set of installed packages (including COUNTER) has changed
4076                 2) the old-style virtuals have changed
4077         """
4078
4079         # Number of uncached packages to trigger cache update, since
4080         # it's wasteful to update it for every vdb change.
4081         _cache_threshold = 5
4082
4083         class BlockerData(object):
4084
4085                 __slots__ = ("__weakref__", "atoms", "counter")
4086
4087                 def __init__(self, counter, atoms):
4088                         self.counter = counter
4089                         self.atoms = atoms
4090
4091         def __init__(self, myroot, vardb):
4092                 self._vardb = vardb
4093                 self._virtuals = vardb.settings.getvirtuals()
4094                 self._cache_filename = os.path.join(myroot,
4095                         portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4096                 self._cache_version = "1"
4097                 self._cache_data = None
4098                 self._modified = set()
4099                 self._load()
4100
4101         def _load(self):
4102                 try:
4103                         f = open(self._cache_filename, mode='rb')
4104                         mypickle = pickle.Unpickler(f)
4105                         self._cache_data = mypickle.load()
4106                         f.close()
4107                         del f
4108                 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4109                         if isinstance(e, pickle.UnpicklingError):
4110                                 writemsg("!!! Error loading '%s': %s\n" % \
4111                                         (self._cache_filename, str(e)), noiselevel=-1)
4112                         del e
4113
4114                 cache_valid = self._cache_data and \
4115                         isinstance(self._cache_data, dict) and \
4116                         self._cache_data.get("version") == self._cache_version and \
4117                         isinstance(self._cache_data.get("blockers"), dict)
4118                 if cache_valid:
4119                         # Validate all the atoms and counters so that
4120                         # corruption is detected as soon as possible.
4121                         invalid_items = set()
4122                         for k, v in self._cache_data["blockers"].iteritems():
4123                                 if not isinstance(k, basestring):
4124                                         invalid_items.add(k)
4125                                         continue
4126                                 try:
4127                                         if portage.catpkgsplit(k) is None:
4128                                                 invalid_items.add(k)
4129                                                 continue
4130                                 except portage.exception.InvalidData:
4131                                         invalid_items.add(k)
4132                                         continue
4133                                 if not isinstance(v, tuple) or \
4134                                         len(v) != 2:
4135                                         invalid_items.add(k)
4136                                         continue
4137                                 counter, atoms = v
4138                                 if not isinstance(counter, (int, long)):
4139                                         invalid_items.add(k)
4140                                         continue
4141                                 if not isinstance(atoms, (list, tuple)):
4142                                         invalid_items.add(k)
4143                                         continue
4144                                 invalid_atom = False
4145                                 for atom in atoms:
4146                                         if not isinstance(atom, basestring):
4147                                                 invalid_atom = True
4148                                                 break
4149                                         if atom[:1] != "!" or \
4150                                                 not portage.isvalidatom(
4151                                                 atom, allow_blockers=True):
4152                                                 invalid_atom = True
4153                                                 break
4154                                 if invalid_atom:
4155                                         invalid_items.add(k)
4156                                         continue
4157
4158                         for k in invalid_items:
4159                                 del self._cache_data["blockers"][k]
4160                         if not self._cache_data["blockers"]:
4161                                 cache_valid = False
4162
4163                 if not cache_valid:
4164                         self._cache_data = {"version":self._cache_version}
4165                         self._cache_data["blockers"] = {}
4166                         self._cache_data["virtuals"] = self._virtuals
4167                 self._modified.clear()
4168
4169         def flush(self):
4170                 """If the current user has permission and the internal blocker cache
4171                 been updated, save it to disk and mark it unmodified.  This is called
4172                 by emerge after it has proccessed blockers for all installed packages.
4173                 Currently, the cache is only written if the user has superuser
4174                 privileges (since that's required to obtain a lock), but all users
4175                 have read access and benefit from faster blocker lookups (as long as
4176                 the entire cache is still valid).  The cache is stored as a pickled
4177                 dict object with the following format:
4178
4179                 {
4180                         version : "1",
4181                         "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4182                         "virtuals" : vardb.settings.getvirtuals()
4183                 }
4184                 """
4185                 if len(self._modified) >= self._cache_threshold and \
4186                         secpass >= 2:
4187                         try:
4188                                 f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4189                                 pickle.dump(self._cache_data, f, -1)
4190                                 f.close()
4191                                 portage.util.apply_secpass_permissions(
4192                                         self._cache_filename, gid=portage.portage_gid, mode=0644)
4193                         except (IOError, OSError), e:
4194                                 pass
4195                         self._modified.clear()
4196
4197         def __setitem__(self, cpv, blocker_data):
4198                 """
4199                 Update the cache and mark it as modified for a future call to
4200                 self.flush().
4201
4202                 @param cpv: Package for which to cache blockers.
4203                 @type cpv: String
4204                 @param blocker_data: An object with counter and atoms attributes.
4205                 @type blocker_data: BlockerData
4206                 """
4207                 self._cache_data["blockers"][cpv] = \
4208                         (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4209                 self._modified.add(cpv)
4210
4211         def __iter__(self):
4212                 if self._cache_data is None:
4213                         # triggered by python-trace
4214                         return iter([])
4215                 return iter(self._cache_data["blockers"])
4216
4217         def __delitem__(self, cpv):
4218                 del self._cache_data["blockers"][cpv]
4219
4220         def __getitem__(self, cpv):
4221                 """
4222                 @rtype: BlockerData
4223                 @returns: An object with counter and atoms attributes.
4224                 """
4225                 return self.BlockerData(*self._cache_data["blockers"][cpv])
4226
4227 class BlockerDB(object):
4228
4229         def __init__(self, root_config):
4230                 self._root_config = root_config
4231                 self._vartree = root_config.trees["vartree"]
4232                 self._portdb = root_config.trees["porttree"].dbapi
4233
4234                 self._dep_check_trees = None
4235                 self._fake_vartree = None
4236
4237         def _get_fake_vartree(self, acquire_lock=0):
4238                 fake_vartree = self._fake_vartree
4239                 if fake_vartree is None:
4240                         fake_vartree = FakeVartree(self._root_config,
4241                                 acquire_lock=acquire_lock)
4242                         self._fake_vartree = fake_vartree
4243                         self._dep_check_trees = { self._vartree.root : {
4244                                 "porttree"    :  fake_vartree,
4245                                 "vartree"     :  fake_vartree,
4246                         }}
4247                 else:
4248                         fake_vartree.sync(acquire_lock=acquire_lock)
4249                 return fake_vartree
4250
4251         def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4252                 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4253                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4254                 settings = self._vartree.settings
4255                 stale_cache = set(blocker_cache)
4256                 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4257                 dep_check_trees = self._dep_check_trees
4258                 vardb = fake_vartree.dbapi
4259                 installed_pkgs = list(vardb)
4260
4261                 for inst_pkg in installed_pkgs:
4262                         stale_cache.discard(inst_pkg.cpv)
4263                         cached_blockers = blocker_cache.get(inst_pkg.cpv)
4264                         if cached_blockers is not None and \
4265                                 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4266                                 cached_blockers = None
4267                         if cached_blockers is not None:
4268                                 blocker_atoms = cached_blockers.atoms
4269                         else:
4270                                 # Use aux_get() to trigger FakeVartree global
4271                                 # updates on *DEPEND when appropriate.
4272                                 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4273                                 try:
4274                                         portage.dep._dep_check_strict = False
4275                                         success, atoms = portage.dep_check(depstr,
4276                                                 vardb, settings, myuse=inst_pkg.use.enabled,
4277                                                 trees=dep_check_trees, myroot=inst_pkg.root)
4278                                 finally:
4279                                         portage.dep._dep_check_strict = True
4280                                 if not success:
4281                                         pkg_location = os.path.join(inst_pkg.root,
4282                                                 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4283                                         portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4284                                                 (pkg_location, atoms), noiselevel=-1)
4285                                         continue
4286
4287                                 blocker_atoms = [atom for atom in atoms \
4288                                         if atom.startswith("!")]
4289                                 blocker_atoms.sort()
4290                                 counter = long(inst_pkg.metadata["COUNTER"])
4291                                 blocker_cache[inst_pkg.cpv] = \
4292                                         blocker_cache.BlockerData(counter, blocker_atoms)
4293                 for cpv in stale_cache:
4294                         del blocker_cache[cpv]
4295                 blocker_cache.flush()
4296
4297                 blocker_parents = digraph()
4298                 blocker_atoms = []
4299                 for pkg in installed_pkgs:
4300                         for blocker_atom in blocker_cache[pkg.cpv].atoms:
4301                                 blocker_atom = blocker_atom.lstrip("!")
4302                                 blocker_atoms.append(blocker_atom)
4303                                 blocker_parents.add(blocker_atom, pkg)
4304
4305                 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4306                 blocking_pkgs = set()
4307                 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4308                         blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4309
4310                 # Check for blockers in the other direction.
4311                 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4312                 try:
4313                         portage.dep._dep_check_strict = False
4314                         success, atoms = portage.dep_check(depstr,
4315                                 vardb, settings, myuse=new_pkg.use.enabled,
4316                                 trees=dep_check_trees, myroot=new_pkg.root)
4317                 finally:
4318                         portage.dep._dep_check_strict = True
4319                 if not success:
4320                         # We should never get this far with invalid deps.
4321                         show_invalid_depstring_notice(new_pkg, depstr, atoms)
4322                         assert False
4323
4324                 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4325                         if atom[:1] == "!"]
4326                 if blocker_atoms:
4327                         blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4328                         for inst_pkg in installed_pkgs:
4329                                 try:
4330                                         blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4331                                 except (portage.exception.InvalidDependString, StopIteration):
4332                                         continue
4333                                 blocking_pkgs.add(inst_pkg)
4334
4335                 return blocking_pkgs
4336
4337 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4338
4339         msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4340                 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4341         p_type, p_root, p_key, p_status = parent_node
4342         msg = []
4343         if p_status == "nomerge":
4344                 category, pf = portage.catsplit(p_key)
4345                 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4346                 msg.append("Portage is unable to process the dependencies of the ")
4347                 msg.append("'%s' package. " % p_key)
4348                 msg.append("In order to correct this problem, the package ")
4349                 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4350                 msg.append("As a temporary workaround, the --nodeps option can ")
4351                 msg.append("be used to ignore all dependencies.  For reference, ")
4352                 msg.append("the problematic dependencies can be found in the ")
4353                 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4354         else:
4355                 msg.append("This package can not be installed. ")
4356                 msg.append("Please notify the '%s' package maintainer " % p_key)
4357                 msg.append("about this problem.")
4358
4359         msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4360         writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4361
4362 class PackageVirtualDbapi(portage.dbapi):
4363         """
4364         A dbapi-like interface class that represents the state of the installed
4365         package database as new packages are installed, replacing any packages
4366         that previously existed in the same slot. The main difference between
4367         this class and fakedbapi is that this one uses Package instances
4368         internally (passed in via cpv_inject() and cpv_remove() calls).
4369         """
4370         def __init__(self, settings):
4371                 portage.dbapi.__init__(self)
4372                 self.settings = settings
4373                 self._match_cache = {}
4374                 self._cp_map = {}
4375                 self._cpv_map = {}
4376
4377         def clear(self):
4378                 """
4379                 Remove all packages.
4380                 """
4381                 if self._cpv_map:
4382                         self._clear_cache()
4383                         self._cp_map.clear()
4384                         self._cpv_map.clear()
4385
4386         def copy(self):
4387                 obj = PackageVirtualDbapi(self.settings)
4388                 obj._match_cache = self._match_cache.copy()
4389                 obj._cp_map = self._cp_map.copy()
4390                 for k, v in obj._cp_map.iteritems():
4391                         obj._cp_map[k] = v[:]
4392                 obj._cpv_map = self._cpv_map.copy()
4393                 return obj
4394
4395         def __iter__(self):
4396                 return self._cpv_map.itervalues()
4397
4398         def __contains__(self, item):
4399                 existing = self._cpv_map.get(item.cpv)
4400                 if existing is not None and \
4401                         existing == item:
4402                         return True
4403                 return False
4404
4405         def get(self, item, default=None):
4406                 cpv = getattr(item, "cpv", None)
4407                 if cpv is None:
4408                         if len(item) != 4:
4409                                 return default
4410                         type_name, root, cpv, operation = item
4411
4412                 existing = self._cpv_map.get(cpv)
4413                 if existing is not None and \
4414                         existing == item:
4415                         return existing
4416                 return default
4417
4418         def match_pkgs(self, atom):
4419                 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4420
4421         def _clear_cache(self):
4422                 if self._categories is not None:
4423                         self._categories = None
4424                 if self._match_cache:
4425                         self._match_cache = {}
4426
4427         def match(self, origdep, use_cache=1):
4428                 result = self._match_cache.get(origdep)
4429                 if result is not None:
4430                         return result[:]
4431                 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4432                 self._match_cache[origdep] = result
4433                 return result[:]
4434
4435         def cpv_exists(self, cpv):
4436                 return cpv in self._cpv_map
4437
4438         def cp_list(self, mycp, use_cache=1):
4439                 cachelist = self._match_cache.get(mycp)
4440                 # cp_list() doesn't expand old-style virtuals
4441                 if cachelist and cachelist[0].startswith(mycp):
4442                         return cachelist[:]
4443                 cpv_list = self._cp_map.get(mycp)
4444                 if cpv_list is None:
4445                         cpv_list = []
4446                 else:
4447                         cpv_list = [pkg.cpv for pkg in cpv_list]
4448                 self._cpv_sort_ascending(cpv_list)
4449                 if not (not cpv_list and mycp.startswith("virtual/")):
4450                         self._match_cache[mycp] = cpv_list
4451                 return cpv_list[:]
4452
4453         def cp_all(self):
4454                 return list(self._cp_map)
4455
4456         def cpv_all(self):
4457                 return list(self._cpv_map)
4458
4459         def cpv_inject(self, pkg):
4460                 cp_list = self._cp_map.get(pkg.cp)
4461                 if cp_list is None:
4462                         cp_list = []
4463                         self._cp_map[pkg.cp] = cp_list
4464                 e_pkg = self._cpv_map.get(pkg.cpv)
4465                 if e_pkg is not None:
4466                         if e_pkg == pkg:
4467                                 return
4468                         self.cpv_remove(e_pkg)
4469                 for e_pkg in cp_list:
4470                         if e_pkg.slot_atom == pkg.slot_atom:
4471                                 if e_pkg == pkg:
4472                                         return
4473                                 self.cpv_remove(e_pkg)
4474                                 break
4475                 cp_list.append(pkg)
4476                 self._cpv_map[pkg.cpv] = pkg
4477                 self._clear_cache()
4478
4479         def cpv_remove(self, pkg):
4480                 old_pkg = self._cpv_map.get(pkg.cpv)
4481                 if old_pkg != pkg:
4482                         raise KeyError(pkg)
4483                 self._cp_map[pkg.cp].remove(pkg)
4484                 del self._cpv_map[pkg.cpv]
4485                 self._clear_cache()
4486
4487         def aux_get(self, cpv, wants):
4488                 metadata = self._cpv_map[cpv].metadata
4489                 return [metadata.get(x, "") for x in wants]
4490
4491         def aux_update(self, cpv, values):
4492                 self._cpv_map[cpv].metadata.update(values)
4493                 self._clear_cache()
4494
4495 class depgraph(object):
4496
4497         pkg_tree_map = RootConfig.pkg_tree_map
4498
4499         _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4500
4501         def __init__(self, settings, trees, myopts, myparams, spinner):
4502                 self.settings = settings
4503                 self.target_root = settings["ROOT"]
4504                 self.myopts = myopts
4505                 self.myparams = myparams
4506                 self.edebug = 0
4507                 if settings.get("PORTAGE_DEBUG", "") == "1":
4508                         self.edebug = 1
4509                 self.spinner = spinner
4510                 self._running_root = trees["/"]["root_config"]
4511                 self._opts_no_restart = Scheduler._opts_no_restart
4512                 self.pkgsettings = {}
4513                 # Maps slot atom to package for each Package added to the graph.
4514                 self._slot_pkg_map = {}
4515                 # Maps nodes to the reasons they were selected for reinstallation.
4516                 self._reinstall_nodes = {}
4517                 self.mydbapi = {}
4518                 self.trees = {}
4519                 self._trees_orig = trees
4520                 self.roots = {}
4521                 # Contains a filtered view of preferred packages that are selected
4522                 # from available repositories.
4523                 self._filtered_trees = {}
4524                 # Contains installed packages and new packages that have been added
4525                 # to the graph.
4526                 self._graph_trees = {}
4527                 # All Package instances
4528                 self._pkg_cache = {}
4529                 for myroot in trees:
4530                         self.trees[myroot] = {}
4531                         # Create a RootConfig instance that references
4532                         # the FakeVartree instead of the real one.
4533                         self.roots[myroot] = RootConfig(
4534                                 trees[myroot]["vartree"].settings,
4535                                 self.trees[myroot],
4536                                 trees[myroot]["root_config"].setconfig)
4537                         for tree in ("porttree", "bintree"):
4538                                 self.trees[myroot][tree] = trees[myroot][tree]
4539                         self.trees[myroot]["vartree"] = \
4540                                 FakeVartree(trees[myroot]["root_config"],
4541                                         pkg_cache=self._pkg_cache)
4542                         self.pkgsettings[myroot] = portage.config(
4543                                 clone=self.trees[myroot]["vartree"].settings)
4544                         self._slot_pkg_map[myroot] = {}
4545                         vardb = self.trees[myroot]["vartree"].dbapi
4546                         preload_installed_pkgs = "--nodeps" not in self.myopts and \
4547                                 "--buildpkgonly" not in self.myopts
4548                         # This fakedbapi instance will model the state that the vdb will
4549                         # have after new packages have been installed.
4550                         fakedb = PackageVirtualDbapi(vardb.settings)
4551                         if preload_installed_pkgs:
4552                                 for pkg in vardb:
4553                                         self.spinner.update()
4554                                         # This triggers metadata updates via FakeVartree.
4555                                         vardb.aux_get(pkg.cpv, [])
4556                                         fakedb.cpv_inject(pkg)
4557
4558                         # Now that the vardb state is cached in our FakeVartree,
4559                         # we won't be needing the real vartree cache for awhile.
4560                         # To make some room on the heap, clear the vardbapi
4561                         # caches.
4562                         trees[myroot]["vartree"].dbapi._clear_cache()
4563                         gc.collect()
4564
4565                         self.mydbapi[myroot] = fakedb
4566                         def graph_tree():
4567                                 pass
4568                         graph_tree.dbapi = fakedb
4569                         self._graph_trees[myroot] = {}
4570                         self._filtered_trees[myroot] = {}
4571                         # Substitute the graph tree for the vartree in dep_check() since we
4572                         # want atom selections to be consistent with package selections
4573                         # have already been made.
4574                         self._graph_trees[myroot]["porttree"]   = graph_tree
4575                         self._graph_trees[myroot]["vartree"]    = graph_tree
4576                         def filtered_tree():
4577                                 pass
4578                         filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4579                         self._filtered_trees[myroot]["porttree"] = filtered_tree
4580
4581                         # Passing in graph_tree as the vartree here could lead to better
4582                         # atom selections in some cases by causing atoms for packages that
4583                         # have been added to the graph to be preferred over other choices.
4584                         # However, it can trigger atom selections that result in
4585                         # unresolvable direct circular dependencies. For example, this
4586                         # happens with gwydion-dylan which depends on either itself or
4587                         # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4588                         # gwydion-dylan-bin needs to be selected in order to avoid a
4589                         # an unresolvable direct circular dependency.
4590                         #
4591                         # To solve the problem described above, pass in "graph_db" so that
4592                         # packages that have been added to the graph are distinguishable
4593                         # from other available packages and installed packages. Also, pass
4594                         # the parent package into self._select_atoms() calls so that
4595                         # unresolvable direct circular dependencies can be detected and
4596                         # avoided when possible.
4597                         self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4598                         self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4599
4600                         dbs = []
4601                         portdb = self.trees[myroot]["porttree"].dbapi
4602                         bindb  = self.trees[myroot]["bintree"].dbapi
4603                         vardb  = self.trees[myroot]["vartree"].dbapi
4604                         #               (db, pkg_type, built, installed, db_keys)
4605                         if "--usepkgonly" not in self.myopts:
4606                                 db_keys = list(portdb._aux_cache_keys)
4607                                 dbs.append((portdb, "ebuild", False, False, db_keys))
4608                         if "--usepkg" in self.myopts:
4609                                 db_keys = list(bindb._aux_cache_keys)
4610                                 dbs.append((bindb,  "binary", True, False, db_keys))
4611                         db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4612                         dbs.append((vardb, "installed", True, True, db_keys))
4613                         self._filtered_trees[myroot]["dbs"] = dbs
4614                         if "--usepkg" in self.myopts:
4615                                 self.trees[myroot]["bintree"].populate(
4616                                         "--getbinpkg" in self.myopts,
4617                                         "--getbinpkgonly" in self.myopts)
4618                 del trees
4619
4620                 self.digraph=portage.digraph()
4621                 # contains all sets added to the graph
4622                 self._sets = {}
4623                 # contains atoms given as arguments
4624                 self._sets["args"] = InternalPackageSet()
4625                 # contains all atoms from all sets added to the graph, including
4626                 # atoms given as arguments
4627                 self._set_atoms = InternalPackageSet()
4628                 self._atom_arg_map = {}
4629                 # contains all nodes pulled in by self._set_atoms
4630                 self._set_nodes = set()
4631                 # Contains only Blocker -> Uninstall edges
4632                 self._blocker_uninstalls = digraph()
4633                 # Contains only Package -> Blocker edges
4634                 self._blocker_parents = digraph()
4635                 # Contains only irrelevant Package -> Blocker edges
4636                 self._irrelevant_blockers = digraph()
4637                 # Contains only unsolvable Package -> Blocker edges
4638                 self._unsolvable_blockers = digraph()
4639                 # Contains all Blocker -> Blocked Package edges
4640                 self._blocked_pkgs = digraph()
4641                 # Contains world packages that have been protected from
4642                 # uninstallation but may not have been added to the graph
4643                 # if the graph is not complete yet.
4644                 self._blocked_world_pkgs = {}
4645                 self._slot_collision_info = {}
4646                 # Slot collision nodes are not allowed to block other packages since
4647                 # blocker validation is only able to account for one package per slot.
4648                 self._slot_collision_nodes = set()
4649                 self._parent_atoms = {}
4650                 self._slot_conflict_parent_atoms = set()
4651                 self._serialized_tasks_cache = None
4652                 self._scheduler_graph = None
4653                 self._displayed_list = None
4654                 self._pprovided_args = []
4655                 self._missing_args = []
4656                 self._masked_installed = set()
4657                 self._unsatisfied_deps_for_display = []
4658                 self._unsatisfied_blockers_for_display = None
4659                 self._circular_deps_for_display = None
4660                 self._dep_stack = []
4661                 self._unsatisfied_deps = []
4662                 self._initially_unsatisfied_deps = []
4663                 self._ignored_deps = []
4664                 self._required_set_names = set(["system", "world"])
4665                 self._select_atoms = self._select_atoms_highest_available
4666                 self._select_package = self._select_pkg_highest_available
4667                 self._highest_pkg_cache = {}
4668
4669         def _show_slot_collision_notice(self):
4670                 """Show an informational message advising the user to mask one of the
4671                 the packages. In some cases it may be possible to resolve this
4672                 automatically, but support for backtracking (removal nodes that have
4673                 already been selected) will be required in order to handle all possible
4674                 cases.
4675                 """
4676
4677                 if not self._slot_collision_info:
4678                         return
4679
4680                 self._show_merge_list()
4681
4682                 msg = []
4683                 msg.append("\n!!! Multiple package instances within a single " + \
4684                         "package slot have been pulled\n")
4685                 msg.append("!!! into the dependency graph, resulting" + \
4686                         " in a slot conflict:\n\n")
4687                 indent = "  "
4688                 # Max number of parents shown, to avoid flooding the display.
4689                 max_parents = 3
4690                 explanation_columns = 70
4691                 explanations = 0
4692                 for (slot_atom, root), slot_nodes \
4693                         in self._slot_collision_info.iteritems():
4694                         msg.append(str(slot_atom))
4695                         msg.append("\n\n")
4696
4697                         for node in slot_nodes:
4698                                 msg.append(indent)
4699                                 msg.append(str(node))
4700                                 parent_atoms = self._parent_atoms.get(node)
4701                                 if parent_atoms:
4702                                         pruned_list = set()
4703                                         # Prefer conflict atoms over others.
4704                                         for parent_atom in parent_atoms:
4705                                                 if len(pruned_list) >= max_parents:
4706                                                         break
4707                                                 if parent_atom in self._slot_conflict_parent_atoms:
4708                                                         pruned_list.add(parent_atom)
4709
4710                                         # If this package was pulled in by conflict atoms then
4711                                         # show those alone since those are the most interesting.
4712                                         if not pruned_list:
4713                                                 # When generating the pruned list, prefer instances
4714                                                 # of DependencyArg over instances of Package.
4715                                                 for parent_atom in parent_atoms:
4716                                                         if len(pruned_list) >= max_parents:
4717                                                                 break
4718                                                         parent, atom = parent_atom
4719                                                         if isinstance(parent, DependencyArg):
4720                                                                 pruned_list.add(parent_atom)
4721                                                 # Prefer Packages instances that themselves have been
4722                                                 # pulled into collision slots.
4723                                                 for parent_atom in parent_atoms:
4724                                                         if len(pruned_list) >= max_parents:
4725                                                                 break
4726                                                         parent, atom = parent_atom
4727                                                         if isinstance(parent, Package) and \
4728                                                                 (parent.slot_atom, parent.root) \
4729                                                                 in self._slot_collision_info:
4730                                                                 pruned_list.add(parent_atom)
4731                                                 for parent_atom in parent_atoms:
4732                                                         if len(pruned_list) >= max_parents:
4733                                                                 break
4734                                                         pruned_list.add(parent_atom)
4735                                         omitted_parents = len(parent_atoms) - len(pruned_list)
4736                                         parent_atoms = pruned_list
4737                                         msg.append(" pulled in by\n")
4738                                         for parent_atom in parent_atoms:
4739                                                 parent, atom = parent_atom
4740                                                 msg.append(2*indent)
4741                                                 if isinstance(parent,
4742                                                         (PackageArg, AtomArg)):
4743                                                         # For PackageArg and AtomArg types, it's
4744                                                         # redundant to display the atom attribute.
4745                                                         msg.append(str(parent))
4746                                                 else:
4747                                                         # Display the specific atom from SetArg or
4748                                                         # Package types.
4749                                                         msg.append("%s required by %s" % (atom, parent))
4750                                                 msg.append("\n")
4751                                         if omitted_parents:
4752                                                 msg.append(2*indent)
4753                                                 msg.append("(and %d more)\n" % omitted_parents)
4754                                 else:
4755                                         msg.append(" (no parents)\n")
4756                                 msg.append("\n")
4757                         explanation = self._slot_conflict_explanation(slot_nodes)
4758                         if explanation:
4759                                 explanations += 1
4760                                 msg.append(indent + "Explanation:\n\n")
4761                                 for line in textwrap.wrap(explanation, explanation_columns):
4762                                         msg.append(2*indent + line + "\n")
4763                                 msg.append("\n")
4764                 msg.append("\n")
4765                 sys.stderr.write("".join(msg))
4766                 sys.stderr.flush()
4767
4768                 explanations_for_all = explanations == len(self._slot_collision_info)
4769
4770                 if explanations_for_all or "--quiet" in self.myopts:
4771                         return
4772
4773                 msg = []
4774                 msg.append("It may be possible to solve this problem ")
4775                 msg.append("by using package.mask to prevent one of ")
4776                 msg.append("those packages from being selected. ")
4777                 msg.append("However, it is also possible that conflicting ")
4778                 msg.append("dependencies exist such that they are impossible to ")
4779                 msg.append("satisfy simultaneously.  If such a conflict exists in ")
4780                 msg.append("the dependencies of two different packages, then those ")
4781                 msg.append("packages can not be installed simultaneously.")
4782
4783                 from formatter import AbstractFormatter, DumbWriter
4784                 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4785                 for x in msg:
4786                         f.add_flowing_data(x)
4787                 f.end_paragraph(1)
4788
4789                 msg = []
4790                 msg.append("For more information, see MASKED PACKAGES ")
4791                 msg.append("section in the emerge man page or refer ")
4792                 msg.append("to the Gentoo Handbook.")
4793                 for x in msg:
4794                         f.add_flowing_data(x)
4795                 f.end_paragraph(1)
4796                 f.writer.flush()
4797
4798         def _slot_conflict_explanation(self, slot_nodes):
4799                 """
4800                 When a slot conflict occurs due to USE deps, there are a few
4801                 different cases to consider:
4802
4803                 1) New USE are correctly set but --newuse wasn't requested so an
4804                    installed package with incorrect USE happened to get pulled
4805                    into graph before the new one.
4806
4807                 2) New USE are incorrectly set but an installed package has correct
4808                    USE so it got pulled into the graph, and a new instance also got
4809                    pulled in due to --newuse or an upgrade.
4810
4811                 3) Multiple USE deps exist that can't be satisfied simultaneously,
4812                    and multiple package instances got pulled into the same slot to
4813                    satisfy the conflicting deps.
4814
4815                 Currently, explanations and suggested courses of action are generated
4816                 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4817                 """
4818
4819                 if len(slot_nodes) != 2:
4820                         # Suggestions are only implemented for
4821                         # conflicts between two packages.
4822                         return None
4823
4824                 all_conflict_atoms = self._slot_conflict_parent_atoms
4825                 matched_node = None
4826                 matched_atoms = None
4827                 unmatched_node = None
4828                 for node in slot_nodes:
4829                         parent_atoms = self._parent_atoms.get(node)
4830                         if not parent_atoms:
4831                                 # Normally, there are always parent atoms. If there are
4832                                 # none then something unexpected is happening and there's
4833                                 # currently no suggestion for this case.
4834                                 return None
4835                         conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4836                         for parent_atom in conflict_atoms:
4837                                 parent, atom = parent_atom
4838                                 if not atom.use:
4839                                         # Suggestions are currently only implemented for cases
4840                                         # in which all conflict atoms have USE deps.
4841                                         return None
4842                         if conflict_atoms:
4843                                 if matched_node is not None:
4844                                         # If conflict atoms match multiple nodes
4845                                         # then there's no suggestion.
4846                                         return None
4847                                 matched_node = node
4848                                 matched_atoms = conflict_atoms
4849                         else:
4850                                 if unmatched_node is not None:
4851                                         # Neither node is matched by conflict atoms, and
4852                                         # there is no suggestion for this case.
4853                                         return None
4854                                 unmatched_node = node
4855
4856                 if matched_node is None or unmatched_node is None:
4857                         # This shouldn't happen.
4858                         return None
4859
4860                 if unmatched_node.installed and not matched_node.installed:
4861                         return "New USE are correctly set, but --newuse wasn't" + \
4862                                 " requested, so an installed package with incorrect USE " + \
4863                                 "happened to get pulled into the dependency graph. " + \
4864                                 "In order to solve " + \
4865                                 "this, either specify the --newuse option or explicitly " + \
4866                                 " reinstall '%s'." % matched_node.slot_atom
4867
4868                 if matched_node.installed and not unmatched_node.installed:
4869                         atoms = sorted(set(atom for parent, atom in matched_atoms))
4870                         explanation = ("New USE for '%s' are incorrectly set. " + \
4871                                 "In order to solve this, adjust USE to satisfy '%s'") % \
4872                                 (matched_node.slot_atom, atoms[0])
4873                         if len(atoms) > 1:
4874                                 for atom in atoms[1:-1]:
4875                                         explanation += ", '%s'" % (atom,)
4876                                 if len(atoms) > 2:
4877                                         explanation += ","
4878                                 explanation += " and '%s'" % (atoms[-1],)
4879                         explanation += "."
4880                         return explanation
4881
4882                 return None
4883
4884         def _process_slot_conflicts(self):
4885                 """
4886                 Process slot conflict data to identify specific atoms which
4887                 lead to conflict. These atoms only match a subset of the
4888                 packages that have been pulled into a given slot.
4889                 """
4890                 for (slot_atom, root), slot_nodes \
4891                         in self._slot_collision_info.iteritems():
4892
4893                         all_parent_atoms = set()
4894                         for pkg in slot_nodes:
4895                                 parent_atoms = self._parent_atoms.get(pkg)
4896                                 if not parent_atoms:
4897                                         continue
4898                                 all_parent_atoms.update(parent_atoms)
4899
4900                         for pkg in slot_nodes:
4901                                 parent_atoms = self._parent_atoms.get(pkg)
4902                                 if parent_atoms is None:
4903                                         parent_atoms = set()
4904                                         self._parent_atoms[pkg] = parent_atoms
4905                                 for parent_atom in all_parent_atoms:
4906                                         if parent_atom in parent_atoms:
4907                                                 continue
4908                                         # Use package set for matching since it will match via
4909                                         # PROVIDE when necessary, while match_from_list does not.
4910                                         parent, atom = parent_atom
4911                                         atom_set = InternalPackageSet(
4912                                                 initial_atoms=(atom,))
4913                                         if atom_set.findAtomForPackage(pkg):
4914                                                 parent_atoms.add(parent_atom)
4915                                         else:
4916                                                 self._slot_conflict_parent_atoms.add(parent_atom)
4917
4918         def _reinstall_for_flags(self, forced_flags,
4919                 orig_use, orig_iuse, cur_use, cur_iuse):
4920                 """Return a set of flags that trigger reinstallation, or None if there
4921                 are no such flags."""
4922                 if "--newuse" in self.myopts:
4923                         flags = set(orig_iuse.symmetric_difference(
4924                                 cur_iuse).difference(forced_flags))
4925                         flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4926                                 cur_iuse.intersection(cur_use)))
4927                         if flags:
4928                                 return flags
4929                 elif "changed-use" == self.myopts.get("--reinstall"):
4930                         flags = orig_iuse.intersection(orig_use).symmetric_difference(
4931                                 cur_iuse.intersection(cur_use))
4932                         if flags:
4933                                 return flags
4934                 return None
4935
4936         def _create_graph(self, allow_unsatisfied=False):
4937                 dep_stack = self._dep_stack
4938                 while dep_stack:
4939                         self.spinner.update()
4940                         dep = dep_stack.pop()
4941                         if isinstance(dep, Package):
4942                                 if not self._add_pkg_deps(dep,
4943                                         allow_unsatisfied=allow_unsatisfied):
4944                                         return 0
4945                                 continue
4946                         if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4947                                 return 0
4948                 return 1
4949
4950         def _add_dep(self, dep, allow_unsatisfied=False):
4951                 debug = "--debug" in self.myopts
4952                 buildpkgonly = "--buildpkgonly" in self.myopts
4953                 nodeps = "--nodeps" in self.myopts
4954                 empty = "empty" in self.myparams
4955                 deep = "deep" in self.myparams
4956                 update = "--update" in self.myopts and dep.depth <= 1
4957                 if dep.blocker:
4958                         if not buildpkgonly and \
4959                                 not nodeps and \
4960                                 dep.parent not in self._slot_collision_nodes:
4961                                 if dep.parent.onlydeps:
4962                                         # It's safe to ignore blockers if the
4963                                         # parent is an --onlydeps node.
4964                                         return 1
4965                                 # The blocker applies to the root where
4966                                 # the parent is or will be installed.
4967                                 blocker = Blocker(atom=dep.atom,
4968                                         eapi=dep.parent.metadata["EAPI"],
4969                                         root=dep.parent.root)
4970                                 self._blocker_parents.add(blocker, dep.parent)
4971                         return 1
4972                 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
4973                         onlydeps=dep.onlydeps)
4974                 if not dep_pkg:
4975                         if dep.priority.optional:
4976                                 # This could be an unecessary build-time dep
4977                                 # pulled in by --with-bdeps=y.
4978                                 return 1
4979                         if allow_unsatisfied:
4980                                 self._unsatisfied_deps.append(dep)
4981                                 return 1
4982                         self._unsatisfied_deps_for_display.append(
4983                                 ((dep.root, dep.atom), {"myparent":dep.parent}))
4984                         return 0
4985                 # In some cases, dep_check will return deps that shouldn't
4986                 # be proccessed any further, so they are identified and
4987                 # discarded here. Try to discard as few as possible since
4988                 # discarded dependencies reduce the amount of information
4989                 # available for optimization of merge order.
4990                 if dep.priority.satisfied and \
4991                         not dep_pkg.installed and \
4992                         not (existing_node or empty or deep or update):
4993                         myarg = None
4994                         if dep.root == self.target_root:
4995                                 try:
4996                                         myarg = self._iter_atoms_for_pkg(dep_pkg).next()
4997                                 except StopIteration:
4998                                         pass
4999                                 except portage.exception.InvalidDependString:
5000                                         if not dep_pkg.installed:
5001                                                 # This shouldn't happen since the package
5002                                                 # should have been masked.
5003                                                 raise
5004                         if not myarg:
5005                                 self._ignored_deps.append(dep)
5006                                 return 1
5007
5008                 if not self._add_pkg(dep_pkg, dep):
5009                         return 0
5010                 return 1
5011
5012         def _add_pkg(self, pkg, dep):
5013                 myparent = None
5014                 priority = None
5015                 depth = 0
5016                 if dep is None:
5017                         dep = Dependency()
5018                 else:
5019                         myparent = dep.parent
5020                         priority = dep.priority
5021                         depth = dep.depth
5022                 if priority is None:
5023                         priority = DepPriority()
5024                 """
5025                 Fills the digraph with nodes comprised of packages to merge.
5026                 mybigkey is the package spec of the package to merge.
5027                 myparent is the package depending on mybigkey ( or None )
5028                 addme = Should we add this package to the digraph or are we just looking at it's deps?
5029                         Think --onlydeps, we need to ignore packages in that case.
5030                 #stuff to add:
5031                 #SLOT-aware emerge
5032                 #IUSE-aware emerge -> USE DEP aware depgraph
5033                 #"no downgrade" emerge
5034                 """
5035                 # Ensure that the dependencies of the same package
5036                 # are never processed more than once.
5037                 previously_added = pkg in self.digraph
5038
5039                 # select the correct /var database that we'll be checking against
5040                 vardbapi = self.trees[pkg.root]["vartree"].dbapi
5041                 pkgsettings = self.pkgsettings[pkg.root]
5042
5043                 arg_atoms = None
5044                 if True:
5045                         try:
5046                                 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
5047                         except portage.exception.InvalidDependString, e:
5048                                 if not pkg.installed:
5049                                         show_invalid_depstring_notice(
5050                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5051                                         return 0
5052                                 del e
5053
5054                 if not pkg.onlydeps:
5055                         if not pkg.installed and \
5056                                 "empty" not in self.myparams and \
5057                                 vardbapi.match(pkg.slot_atom):
5058                                 # Increase the priority of dependencies on packages that
5059                                 # are being rebuilt. This optimizes merge order so that
5060                                 # dependencies are rebuilt/updated as soon as possible,
5061                                 # which is needed especially when emerge is called by
5062                                 # revdep-rebuild since dependencies may be affected by ABI
5063                                 # breakage that has rendered them useless. Don't adjust
5064                                 # priority here when in "empty" mode since all packages
5065                                 # are being merged in that case.
5066                                 priority.rebuild = True
5067
5068                         existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
5069                         slot_collision = False
5070                         if existing_node:
5071                                 existing_node_matches = pkg.cpv == existing_node.cpv
5072                                 if existing_node_matches and \
5073                                         pkg != existing_node and \
5074                                         dep.atom is not None:
5075                                         # Use package set for matching since it will match via
5076                                         # PROVIDE when necessary, while match_from_list does not.
5077                                         atom_set = InternalPackageSet(initial_atoms=[dep.atom])
5078                                         if not atom_set.findAtomForPackage(existing_node):
5079                                                 existing_node_matches = False
5080                                 if existing_node_matches:
5081                                         # The existing node can be reused.
5082                                         if arg_atoms:
5083                                                 for parent_atom in arg_atoms:
5084                                                         parent, atom = parent_atom
5085                                                         self.digraph.add(existing_node, parent,
5086                                                                 priority=priority)
5087                                                         self._add_parent_atom(existing_node, parent_atom)
5088                                         # If a direct circular dependency is not an unsatisfied
5089                                         # buildtime dependency then drop it here since otherwise
5090                                         # it can skew the merge order calculation in an unwanted
5091                                         # way.
5092                                         if existing_node != myparent or \
5093                                                 (priority.buildtime and not priority.satisfied):
5094                                                 self.digraph.addnode(existing_node, myparent,
5095                                                         priority=priority)
5096                                                 if dep.atom is not None and dep.parent is not None:
5097                                                         self._add_parent_atom(existing_node,
5098                                                                 (dep.parent, dep.atom))
5099                                         return 1
5100                                 else:
5101
5102                                         # A slot collision has occurred.  Sometimes this coincides
5103                                         # with unresolvable blockers, so the slot collision will be
5104                                         # shown later if there are no unresolvable blockers.
5105                                         self._add_slot_conflict(pkg)
5106                                         slot_collision = True
5107
5108                         if slot_collision:
5109                                 # Now add this node to the graph so that self.display()
5110                                 # can show use flags and --tree portage.output.  This node is
5111                                 # only being partially added to the graph.  It must not be
5112                                 # allowed to interfere with the other nodes that have been
5113                                 # added.  Do not overwrite data for existing nodes in
5114                                 # self.mydbapi since that data will be used for blocker
5115                                 # validation.
5116                                 # Even though the graph is now invalid, continue to process
5117                                 # dependencies so that things like --fetchonly can still
5118                                 # function despite collisions.
5119                                 pass
5120                         elif not previously_added:
5121                                 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
5122                                 self.mydbapi[pkg.root].cpv_inject(pkg)
5123                                 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
5124
5125                         if not pkg.installed:
5126                                 # Allow this package to satisfy old-style virtuals in case it
5127                                 # doesn't already. Any pre-existing providers will be preferred
5128                                 # over this one.
5129                                 try:
5130                                         pkgsettings.setinst(pkg.cpv, pkg.metadata)
5131                                         # For consistency, also update the global virtuals.
5132                                         settings = self.roots[pkg.root].settings
5133                                         settings.unlock()
5134                                         settings.setinst(pkg.cpv, pkg.metadata)
5135                                         settings.lock()
5136                                 except portage.exception.InvalidDependString, e:
5137                                         show_invalid_depstring_notice(
5138                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5139                                         del e
5140                                         return 0
5141
5142                 if arg_atoms:
5143                         self._set_nodes.add(pkg)
5144
5145                 # Do this even when addme is False (--onlydeps) so that the
5146                 # parent/child relationship is always known in case
5147                 # self._show_slot_collision_notice() needs to be called later.
5148                 self.digraph.add(pkg, myparent, priority=priority)
5149                 if dep.atom is not None and dep.parent is not None:
5150                         self._add_parent_atom(pkg, (dep.parent, dep.atom))
5151
5152                 if arg_atoms:
5153                         for parent_atom in arg_atoms:
5154                                 parent, atom = parent_atom
5155                                 self.digraph.add(pkg, parent, priority=priority)
5156                                 self._add_parent_atom(pkg, parent_atom)
5157
5158                 """ This section determines whether we go deeper into dependencies or not.
5159                     We want to go deeper on a few occasions:
5160                     Installing package A, we need to make sure package A's deps are met.
5161                     emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5162                     If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5163                 """
5164                 dep_stack = self._dep_stack
5165                 if "recurse" not in self.myparams:
5166                         return 1
5167                 elif pkg.installed and \
5168                         "deep" not in self.myparams:
5169                         dep_stack = self._ignored_deps
5170
5171                 self.spinner.update()
5172
5173                 if arg_atoms:
5174                         depth = 0
5175                 pkg.depth = depth
5176                 if not previously_added:
5177                         dep_stack.append(pkg)
5178                 return 1
5179
5180         def _add_parent_atom(self, pkg, parent_atom):
5181                 parent_atoms = self._parent_atoms.get(pkg)
5182                 if parent_atoms is None:
5183                         parent_atoms = set()
5184                         self._parent_atoms[pkg] = parent_atoms
5185                 parent_atoms.add(parent_atom)
5186
5187         def _add_slot_conflict(self, pkg):
5188                 self._slot_collision_nodes.add(pkg)
5189                 slot_key = (pkg.slot_atom, pkg.root)
5190                 slot_nodes = self._slot_collision_info.get(slot_key)
5191                 if slot_nodes is None:
5192                         slot_nodes = set()
5193                         slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5194                         self._slot_collision_info[slot_key] = slot_nodes
5195                 slot_nodes.add(pkg)
5196
5197         def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5198
5199                 mytype = pkg.type_name
5200                 myroot = pkg.root
5201                 mykey = pkg.cpv
5202                 metadata = pkg.metadata
5203                 myuse = pkg.use.enabled
5204                 jbigkey = pkg
5205                 depth = pkg.depth + 1
5206                 removal_action = "remove" in self.myparams
5207
5208                 edepend={}
5209                 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5210                 for k in depkeys:
5211                         edepend[k] = metadata[k]
5212
5213                 if not pkg.built and \
5214                         "--buildpkgonly" in self.myopts and \
5215                         "deep" not in self.myparams and \
5216                         "empty" not in self.myparams:
5217                         edepend["RDEPEND"] = ""
5218                         edepend["PDEPEND"] = ""
5219                 bdeps_optional = False
5220
5221                 if pkg.built and not removal_action:
5222                         if self.myopts.get("--with-bdeps", "n") == "y":
5223                                 # Pull in build time deps as requested, but marked them as
5224                                 # "optional" since they are not strictly required. This allows
5225                                 # more freedom in the merge order calculation for solving
5226                                 # circular dependencies. Don't convert to PDEPEND since that
5227                                 # could make --with-bdeps=y less effective if it is used to
5228                                 # adjust merge order to prevent built_with_use() calls from
5229                                 # failing.
5230                                 bdeps_optional = True
5231                         else:
5232                                 # built packages do not have build time dependencies.
5233                                 edepend["DEPEND"] = ""
5234
5235                 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5236                         edepend["DEPEND"] = ""
5237
5238                 deps = (
5239                         ("/", edepend["DEPEND"],
5240                                 self._priority(buildtime=(not bdeps_optional),
5241                                 optional=bdeps_optional)),
5242                         (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5243                         (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5244                 )
5245
5246                 debug = "--debug" in self.myopts
5247                 strict = mytype != "installed"
5248                 try:
5249                         for dep_root, dep_string, dep_priority in deps:
5250                                 if not dep_string:
5251                                         continue
5252                                 if debug:
5253                                         print
5254                                         print "Parent:   ", jbigkey
5255                                         print "Depstring:", dep_string
5256                                         print "Priority:", dep_priority
5257                                 vardb = self.roots[dep_root].trees["vartree"].dbapi
5258                                 try:
5259                                         selected_atoms = self._select_atoms(dep_root,
5260                                                 dep_string, myuse=myuse, parent=pkg, strict=strict,
5261                                                 priority=dep_priority)
5262                                 except portage.exception.InvalidDependString, e:
5263                                         show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5264                                         return 0
5265                                 if debug:
5266                                         print "Candidates:", selected_atoms
5267
5268                                 for atom in selected_atoms:
5269                                         try:
5270
5271                                                 atom = portage.dep.Atom(atom)
5272
5273                                                 mypriority = dep_priority.copy()
5274                                                 if not atom.blocker and vardb.match(atom):
5275                                                         mypriority.satisfied = True
5276
5277                                                 if not self._add_dep(Dependency(atom=atom,
5278                                                         blocker=atom.blocker, depth=depth, parent=pkg,
5279                                                         priority=mypriority, root=dep_root),
5280                                                         allow_unsatisfied=allow_unsatisfied):
5281                                                         return 0
5282
5283                                         except portage.exception.InvalidAtom, e:
5284                                                 show_invalid_depstring_notice(
5285                                                         pkg, dep_string, str(e))
5286                                                 del e
5287                                                 if not pkg.installed:
5288                                                         return 0
5289
5290                                 if debug:
5291                                         print "Exiting...", jbigkey
5292                 except portage.exception.AmbiguousPackageName, e:
5293                         pkgs = e.args[0]
5294                         portage.writemsg("\n\n!!! An atom in the dependencies " + \
5295                                 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5296                         for cpv in pkgs:
5297                                 portage.writemsg("    %s\n" % cpv, noiselevel=-1)
5298                         portage.writemsg("\n", noiselevel=-1)
5299                         if mytype == "binary":
5300                                 portage.writemsg(
5301                                         "!!! This binary package cannot be installed: '%s'\n" % \
5302                                         mykey, noiselevel=-1)
5303                         elif mytype == "ebuild":
5304                                 portdb = self.roots[myroot].trees["porttree"].dbapi
5305                                 myebuild, mylocation = portdb.findname2(mykey)
5306                                 portage.writemsg("!!! This ebuild cannot be installed: " + \
5307                                         "'%s'\n" % myebuild, noiselevel=-1)
5308                         portage.writemsg("!!! Please notify the package maintainer " + \
5309                                 "that atoms must be fully-qualified.\n", noiselevel=-1)
5310                         return 0
5311                 return 1
5312
5313         def _priority(self, **kwargs):
5314                 if "remove" in self.myparams:
5315                         priority_constructor = UnmergeDepPriority
5316                 else:
5317                         priority_constructor = DepPriority
5318                 return priority_constructor(**kwargs)
5319
5320         def _dep_expand(self, root_config, atom_without_category):
5321                 """
5322                 @param root_config: a root config instance
5323                 @type root_config: RootConfig
5324                 @param atom_without_category: an atom without a category component
5325                 @type atom_without_category: String
5326                 @rtype: list
5327                 @returns: a list of atoms containing categories (possibly empty)
5328                 """
5329                 null_cp = portage.dep_getkey(insert_category_into_atom(
5330                         atom_without_category, "null"))
5331                 cat, atom_pn = portage.catsplit(null_cp)
5332
5333                 dbs = self._filtered_trees[root_config.root]["dbs"]
5334                 categories = set()
5335                 for db, pkg_type, built, installed, db_keys in dbs:
5336                         for cat in db.categories:
5337                                 if db.cp_list("%s/%s" % (cat, atom_pn)):
5338                                         categories.add(cat)
5339
5340                 deps = []
5341                 for cat in categories:
5342                         deps.append(insert_category_into_atom(
5343                                 atom_without_category, cat))
5344                 return deps
5345
5346         def _have_new_virt(self, root, atom_cp):
5347                 ret = False
5348                 for db, pkg_type, built, installed, db_keys in \
5349                         self._filtered_trees[root]["dbs"]:
5350                         if db.cp_list(atom_cp):
5351                                 ret = True
5352                                 break
5353                 return ret
5354
5355         def _iter_atoms_for_pkg(self, pkg):
5356                 # TODO: add multiple $ROOT support
5357                 if pkg.root != self.target_root:
5358                         return
5359                 atom_arg_map = self._atom_arg_map
5360                 root_config = self.roots[pkg.root]
5361                 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5362                         atom_cp = portage.dep_getkey(atom)
5363                         if atom_cp != pkg.cp and \
5364                                 self._have_new_virt(pkg.root, atom_cp):
5365                                 continue
5366                         visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5367                         visible_pkgs.reverse() # descending order
5368                         higher_slot = None
5369                         for visible_pkg in visible_pkgs:
5370                                 if visible_pkg.cp != atom_cp:
5371                                         continue
5372                                 if pkg >= visible_pkg:
5373                                         # This is descending order, and we're not
5374                                         # interested in any versions <= pkg given.
5375                                         break
5376                                 if pkg.slot_atom != visible_pkg.slot_atom:
5377                                         higher_slot = visible_pkg
5378                                         break
5379                         if higher_slot is not None:
5380                                 continue
5381                         for arg in atom_arg_map[(atom, pkg.root)]:
5382                                 if isinstance(arg, PackageArg) and \
5383                                         arg.package != pkg:
5384                                         continue
5385                                 yield arg, atom
5386
5387         def select_files(self, myfiles):
5388                 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5389                 appropriate depgraph and return a favorite list."""
5390                 debug = "--debug" in self.myopts
5391                 root_config = self.roots[self.target_root]
5392                 sets = root_config.sets
5393                 getSetAtoms = root_config.setconfig.getSetAtoms
5394                 myfavorites=[]
5395                 myroot = self.target_root
5396                 dbs = self._filtered_trees[myroot]["dbs"]
5397                 vardb = self.trees[myroot]["vartree"].dbapi
5398                 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5399                 portdb = self.trees[myroot]["porttree"].dbapi
5400                 bindb = self.trees[myroot]["bintree"].dbapi
5401                 pkgsettings = self.pkgsettings[myroot]
5402                 args = []
5403                 onlydeps = "--onlydeps" in self.myopts
5404                 lookup_owners = []
5405                 for x in myfiles:
5406                         ext = os.path.splitext(x)[1]
5407                         if ext==".tbz2":
5408                                 if not os.path.exists(x):
5409                                         if os.path.exists(
5410                                                 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5411                                                 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5412                                         elif os.path.exists(
5413                                                 os.path.join(pkgsettings["PKGDIR"], x)):
5414                                                 x = os.path.join(pkgsettings["PKGDIR"], x)
5415                                         else:
5416                                                 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5417                                                 print "!!! Please ensure the tbz2 exists as specified.\n"
5418                                                 return 0, myfavorites
5419                                 mytbz2=portage.xpak.tbz2(x)
5420                                 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5421                                 if os.path.realpath(x) != \
5422                                         os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5423                                         print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5424                                         return 0, myfavorites
5425                                 db_keys = list(bindb._aux_cache_keys)
5426                                 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5427                                 pkg = Package(type_name="binary", root_config=root_config,
5428                                         cpv=mykey, built=True, metadata=metadata,
5429                                         onlydeps=onlydeps)
5430                                 self._pkg_cache[pkg] = pkg
5431                                 args.append(PackageArg(arg=x, package=pkg,
5432                                         root_config=root_config))
5433                         elif ext==".ebuild":
5434                                 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5435                                 pkgdir = os.path.dirname(ebuild_path)
5436                                 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5437                                 cp = pkgdir[len(tree_root)+1:]
5438                                 e = portage.exception.PackageNotFound(
5439                                         ("%s is not in a valid portage tree " + \
5440                                         "hierarchy or does not exist") % x)
5441                                 if not portage.isvalidatom(cp):
5442                                         raise e
5443                                 cat = portage.catsplit(cp)[0]
5444                                 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5445                                 if not portage.isvalidatom("="+mykey):
5446                                         raise e
5447                                 ebuild_path = portdb.findname(mykey)
5448                                 if ebuild_path:
5449                                         if ebuild_path != os.path.join(os.path.realpath(tree_root),
5450                                                 cp, os.path.basename(ebuild_path)):
5451                                                 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5452                                                 return 0, myfavorites
5453                                         if mykey not in portdb.xmatch(
5454                                                 "match-visible", portage.dep_getkey(mykey)):
5455                                                 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5456                                                 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5457                                                 print colorize("BAD", "*** page for details.")
5458                                                 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5459                                                         "Continuing...")
5460                                 else:
5461                                         raise portage.exception.PackageNotFound(
5462                                                 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5463                                 db_keys = list(portdb._aux_cache_keys)
5464                                 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5465                                 pkg = Package(type_name="ebuild", root_config=root_config,
5466                                         cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5467                                 pkgsettings.setcpv(pkg)
5468                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5469                                 self._pkg_cache[pkg] = pkg
5470                                 args.append(PackageArg(arg=x, package=pkg,
5471                                         root_config=root_config))
5472                         elif x.startswith(os.path.sep):
5473                                 if not x.startswith(myroot):
5474                                         portage.writemsg(("\n\n!!! '%s' does not start with" + \
5475                                                 " $ROOT.\n") % x, noiselevel=-1)
5476                                         return 0, []
5477                                 # Queue these up since it's most efficient to handle
5478                                 # multiple files in a single iter_owners() call.
5479                                 lookup_owners.append(x)
5480                         else:
5481                                 if x in ("system", "world"):
5482                                         x = SETPREFIX + x
5483                                 if x.startswith(SETPREFIX):
5484                                         s = x[len(SETPREFIX):]
5485                                         if s not in sets:
5486                                                 raise portage.exception.PackageSetNotFound(s)
5487                                         if s in self._sets:
5488                                                 continue
5489                                         # Recursively expand sets so that containment tests in
5490                                         # self._get_parent_sets() properly match atoms in nested
5491                                         # sets (like if world contains system).
5492                                         expanded_set = InternalPackageSet(
5493                                                 initial_atoms=getSetAtoms(s))
5494                                         self._sets[s] = expanded_set
5495                                         args.append(SetArg(arg=x, set=expanded_set,
5496                                                 root_config=root_config))
5497                                         continue
5498                                 if not is_valid_package_atom(x):
5499                                         portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5500                                                 noiselevel=-1)
5501                                         portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5502                                         portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5503                                         return (0,[])
5504                                 # Don't expand categories or old-style virtuals here unless
5505                                 # necessary. Expansion of old-style virtuals here causes at
5506                                 # least the following problems:
5507                                 #   1) It's more difficult to determine which set(s) an atom
5508                                 #      came from, if any.
5509                                 #   2) It takes away freedom from the resolver to choose other
5510                                 #      possible expansions when necessary.
5511                                 if "/" in x:
5512                                         args.append(AtomArg(arg=x, atom=x,
5513                                                 root_config=root_config))
5514                                         continue
5515                                 expanded_atoms = self._dep_expand(root_config, x)
5516                                 installed_cp_set = set()
5517                                 for atom in expanded_atoms:
5518                                         atom_cp = portage.dep_getkey(atom)
5519                                         if vardb.cp_list(atom_cp):
5520                                                 installed_cp_set.add(atom_cp)
5521                                 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5522                                         installed_cp = iter(installed_cp_set).next()
5523                                         expanded_atoms = [atom for atom in expanded_atoms \
5524                                                 if portage.dep_getkey(atom) == installed_cp]
5525
5526                                 if len(expanded_atoms) > 1:
5527                                         print
5528                                         print
5529                                         ambiguous_package_name(x, expanded_atoms, root_config,
5530                                                 self.spinner, self.myopts)
5531                                         return False, myfavorites
5532                                 if expanded_atoms:
5533                                         atom = expanded_atoms[0]
5534                                 else:
5535                                         null_atom = insert_category_into_atom(x, "null")
5536                                         null_cp = portage.dep_getkey(null_atom)
5537                                         cat, atom_pn = portage.catsplit(null_cp)
5538                                         virts_p = root_config.settings.get_virts_p().get(atom_pn)
5539                                         if virts_p:
5540                                                 # Allow the depgraph to choose which virtual.
5541                                                 atom = insert_category_into_atom(x, "virtual")
5542                                         else:
5543                                                 atom = insert_category_into_atom(x, "null")
5544
5545                                 args.append(AtomArg(arg=x, atom=atom,
5546                                         root_config=root_config))
5547
5548                 if lookup_owners:
5549                         relative_paths = []
5550                         search_for_multiple = False
5551                         if len(lookup_owners) > 1:
5552                                 search_for_multiple = True
5553
5554                         for x in lookup_owners:
5555                                 if not search_for_multiple and os.path.isdir(x):
5556                                         search_for_multiple = True
5557                                 relative_paths.append(x[len(myroot):])
5558
5559                         owners = set()
5560                         for pkg, relative_path in \
5561                                 real_vardb._owners.iter_owners(relative_paths):
5562                                 owners.add(pkg.mycpv)
5563                                 if not search_for_multiple:
5564                                         break
5565
5566                         if not owners:
5567                                 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5568                                         "by any package.\n") % lookup_owners[0], noiselevel=-1)
5569                                 return 0, []
5570
5571                         for cpv in owners:
5572                                 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5573                                 if not slot:
5574                                         # portage now masks packages with missing slot, but it's
5575                                         # possible that one was installed by an older version
5576                                         atom = portage.cpv_getkey(cpv)
5577                                 else:
5578                                         atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5579                                 args.append(AtomArg(arg=atom, atom=atom,
5580                                         root_config=root_config))
5581
5582                 if "--update" in self.myopts:
5583                         # In some cases, the greedy slots behavior can pull in a slot that
5584                         # the user would want to uninstall due to it being blocked by a
5585                         # newer version in a different slot. Therefore, it's necessary to
5586                         # detect and discard any that should be uninstalled. Each time
5587                         # that arguments are updated, package selections are repeated in
5588                         # order to ensure consistency with the current arguments:
5589                         #
5590                         #  1) Initialize args
5591                         #  2) Select packages and generate initial greedy atoms
5592                         #  3) Update args with greedy atoms
5593                         #  4) Select packages and generate greedy atoms again, while
5594                         #     accounting for any blockers between selected packages
5595                         #  5) Update args with revised greedy atoms
5596
5597                         self._set_args(args)
5598                         greedy_args = []
5599                         for arg in args:
5600                                 greedy_args.append(arg)
5601                                 if not isinstance(arg, AtomArg):
5602                                         continue
5603                                 for atom in self._greedy_slots(arg.root_config, arg.atom):
5604                                         greedy_args.append(
5605                                                 AtomArg(arg=arg.arg, atom=atom,
5606                                                         root_config=arg.root_config))
5607
5608                         self._set_args(greedy_args)
5609                         del greedy_args
5610
5611                         # Revise greedy atoms, accounting for any blockers
5612                         # between selected packages.
5613                         revised_greedy_args = []
5614                         for arg in args:
5615                                 revised_greedy_args.append(arg)
5616                                 if not isinstance(arg, AtomArg):
5617                                         continue
5618                                 for atom in self._greedy_slots(arg.root_config, arg.atom,
5619                                         blocker_lookahead=True):
5620                                         revised_greedy_args.append(
5621                                                 AtomArg(arg=arg.arg, atom=atom,
5622                                                         root_config=arg.root_config))
5623                         args = revised_greedy_args
5624                         del revised_greedy_args
5625
5626                 self._set_args(args)
5627
5628                 myfavorites = set(myfavorites)
5629                 for arg in args:
5630                         if isinstance(arg, (AtomArg, PackageArg)):
5631                                 myfavorites.add(arg.atom)
5632                         elif isinstance(arg, SetArg):
5633                                 myfavorites.add(arg.arg)
5634                 myfavorites = list(myfavorites)
5635
5636                 pprovideddict = pkgsettings.pprovideddict
5637                 if debug:
5638                         portage.writemsg("\n", noiselevel=-1)
5639                 # Order needs to be preserved since a feature of --nodeps
5640                 # is to allow the user to force a specific merge order.
5641                 args.reverse()
5642                 while args:
5643                         arg = args.pop()
5644                         for atom in arg.set:
5645                                 self.spinner.update()
5646                                 dep = Dependency(atom=atom, onlydeps=onlydeps,
5647                                         root=myroot, parent=arg)
5648                                 atom_cp = portage.dep_getkey(atom)
5649                                 try:
5650                                         pprovided = pprovideddict.get(portage.dep_getkey(atom))
5651                                         if pprovided and portage.match_from_list(atom, pprovided):
5652                                                 # A provided package has been specified on the command line.
5653                                                 self._pprovided_args.append((arg, atom))
5654                                                 continue
5655                                         if isinstance(arg, PackageArg):
5656                                                 if not self._add_pkg(arg.package, dep) or \
5657                                                         not self._create_graph():
5658                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5659                                                                 "dependencies for %s\n") % arg.arg)
5660                                                         return 0, myfavorites
5661                                                 continue
5662                                         if debug:
5663                                                 portage.writemsg("      Arg: %s\n     Atom: %s\n" % \
5664                                                         (arg, atom), noiselevel=-1)
5665                                         pkg, existing_node = self._select_package(
5666                                                 myroot, atom, onlydeps=onlydeps)
5667                                         if not pkg:
5668                                                 if not (isinstance(arg, SetArg) and \
5669                                                         arg.name in ("system", "world")):
5670                                                         self._unsatisfied_deps_for_display.append(
5671                                                                 ((myroot, atom), {}))
5672                                                         return 0, myfavorites
5673                                                 self._missing_args.append((arg, atom))
5674                                                 continue
5675                                         if atom_cp != pkg.cp:
5676                                                 # For old-style virtuals, we need to repeat the
5677                                                 # package.provided check against the selected package.
5678                                                 expanded_atom = atom.replace(atom_cp, pkg.cp)
5679                                                 pprovided = pprovideddict.get(pkg.cp)
5680                                                 if pprovided and \
5681                                                         portage.match_from_list(expanded_atom, pprovided):
5682                                                         # A provided package has been
5683                                                         # specified on the command line.
5684                                                         self._pprovided_args.append((arg, atom))
5685                                                         continue
5686                                         if pkg.installed and "selective" not in self.myparams:
5687                                                 self._unsatisfied_deps_for_display.append(
5688                                                         ((myroot, atom), {}))
5689                                                 # Previous behavior was to bail out in this case, but
5690                                                 # since the dep is satisfied by the installed package,
5691                                                 # it's more friendly to continue building the graph
5692                                                 # and just show a warning message. Therefore, only bail
5693                                                 # out here if the atom is not from either the system or
5694                                                 # world set.
5695                                                 if not (isinstance(arg, SetArg) and \
5696                                                         arg.name in ("system", "world")):
5697                                                         return 0, myfavorites
5698
5699                                         # Add the selected package to the graph as soon as possible
5700                                         # so that later dep_check() calls can use it as feedback
5701                                         # for making more consistent atom selections.
5702                                         if not self._add_pkg(pkg, dep):
5703                                                 if isinstance(arg, SetArg):
5704                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5705                                                                 "dependencies for %s from %s\n") % \
5706                                                                 (atom, arg.arg))
5707                                                 else:
5708                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5709                                                                 "dependencies for %s\n") % atom)
5710                                                 return 0, myfavorites
5711
5712                                 except portage.exception.MissingSignature, e:
5713                                         portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5714                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5715                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5716                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5717                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5718                                         return 0, myfavorites
5719                                 except portage.exception.InvalidSignature, e:
5720                                         portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5721                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5722                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5723                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5724                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5725                                         return 0, myfavorites
5726                                 except SystemExit, e:
5727                                         raise # Needed else can't exit
5728                                 except Exception, e:
5729                                         print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5730                                         print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5731                                         raise
5732
5733                 # Now that the root packages have been added to the graph,
5734                 # process the dependencies.
5735                 if not self._create_graph():
5736                         return 0, myfavorites
5737
5738                 missing=0
5739                 if "--usepkgonly" in self.myopts:
5740                         for xs in self.digraph.all_nodes():
5741                                 if not isinstance(xs, Package):
5742                                         continue
5743                                 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5744                                         if missing == 0:
5745                                                 print
5746                                         missing += 1
5747                                         print "Missing binary for:",xs[2]
5748
5749                 try:
5750                         self.altlist()
5751                 except self._unknown_internal_error:
5752                         return False, myfavorites
5753
5754                 # We're true here unless we are missing binaries.
5755                 return (not missing,myfavorites)
5756
5757         def _set_args(self, args):
5758                 """
5759                 Create the "args" package set from atoms and packages given as
5760                 arguments. This method can be called multiple times if necessary.
5761                 The package selection cache is automatically invalidated, since
5762                 arguments influence package selections.
5763                 """
5764                 args_set = self._sets["args"]
5765                 args_set.clear()
5766                 for arg in args:
5767                         if not isinstance(arg, (AtomArg, PackageArg)):
5768                                 continue
5769                         atom = arg.atom
5770                         if atom in args_set:
5771                                 continue
5772                         args_set.add(atom)
5773
5774                 self._set_atoms.clear()
5775                 self._set_atoms.update(chain(*self._sets.itervalues()))
5776                 atom_arg_map = self._atom_arg_map
5777                 atom_arg_map.clear()
5778                 for arg in args:
5779                         for atom in arg.set:
5780                                 atom_key = (atom, arg.root_config.root)
5781                                 refs = atom_arg_map.get(atom_key)
5782                                 if refs is None:
5783                                         refs = []
5784                                         atom_arg_map[atom_key] = refs
5785                                         if arg not in refs:
5786                                                 refs.append(arg)
5787
5788                 # Invalidate the package selection cache, since
5789                 # arguments influence package selections.
5790                 self._highest_pkg_cache.clear()
5791                 for trees in self._filtered_trees.itervalues():
5792                         trees["porttree"].dbapi._clear_cache()
5793
5794         def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5795                 """
5796                 Return a list of slot atoms corresponding to installed slots that
5797                 differ from the slot of the highest visible match. When
5798                 blocker_lookahead is True, slot atoms that would trigger a blocker
5799                 conflict are automatically discarded, potentially allowing automatic
5800                 uninstallation of older slots when appropriate.
5801                 """
5802                 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5803                 if highest_pkg is None:
5804                         return []
5805                 vardb = root_config.trees["vartree"].dbapi
5806                 slots = set()
5807                 for cpv in vardb.match(atom):
5808                         # don't mix new virtuals with old virtuals
5809                         if portage.cpv_getkey(cpv) == highest_pkg.cp:
5810                                 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5811
5812                 slots.add(highest_pkg.metadata["SLOT"])
5813                 if len(slots) == 1:
5814                         return []
5815                 greedy_pkgs = []
5816                 slots.remove(highest_pkg.metadata["SLOT"])
5817                 while slots:
5818                         slot = slots.pop()
5819                         slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5820                         pkg, in_graph = self._select_package(root_config.root, slot_atom)
5821                         if pkg is not None and \
5822                                 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
5823                                 greedy_pkgs.append(pkg)
5824                 if not greedy_pkgs:
5825                         return []
5826                 if not blocker_lookahead:
5827                         return [pkg.slot_atom for pkg in greedy_pkgs]
5828
5829                 blockers = {}
5830                 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5831                 for pkg in greedy_pkgs + [highest_pkg]:
5832                         dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5833                         try:
5834                                 atoms = self._select_atoms(
5835                                         pkg.root, dep_str, pkg.use.enabled,
5836                                         parent=pkg, strict=True)
5837                         except portage.exception.InvalidDependString:
5838                                 continue
5839                         blocker_atoms = (x for x in atoms if x.blocker)
5840                         blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5841
5842                 if highest_pkg not in blockers:
5843                         return []
5844
5845                 # filter packages with invalid deps
5846                 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5847
5848                 # filter packages that conflict with highest_pkg
5849                 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5850                         (blockers[highest_pkg].findAtomForPackage(pkg) or \
5851                         blockers[pkg].findAtomForPackage(highest_pkg))]
5852
5853                 if not greedy_pkgs:
5854                         return []
5855
5856                 # If two packages conflict, discard the lower version.
5857                 discard_pkgs = set()
5858                 greedy_pkgs.sort(reverse=True)
5859                 for i in xrange(len(greedy_pkgs) - 1):
5860                         pkg1 = greedy_pkgs[i]
5861                         if pkg1 in discard_pkgs:
5862                                 continue
5863                         for j in xrange(i + 1, len(greedy_pkgs)):
5864                                 pkg2 = greedy_pkgs[j]
5865                                 if pkg2 in discard_pkgs:
5866                                         continue
5867                                 if blockers[pkg1].findAtomForPackage(pkg2) or \
5868                                         blockers[pkg2].findAtomForPackage(pkg1):
5869                                         # pkg1 > pkg2
5870                                         discard_pkgs.add(pkg2)
5871
5872                 return [pkg.slot_atom for pkg in greedy_pkgs \
5873                         if pkg not in discard_pkgs]
5874
5875         def _select_atoms_from_graph(self, *pargs, **kwargs):
5876                 """
5877                 Prefer atoms matching packages that have already been
5878                 added to the graph or those that are installed and have
5879                 not been scheduled for replacement.
5880                 """
5881                 kwargs["trees"] = self._graph_trees
5882                 return self._select_atoms_highest_available(*pargs, **kwargs)
5883
5884         def _select_atoms_highest_available(self, root, depstring,
5885                 myuse=None, parent=None, strict=True, trees=None, priority=None):
5886                 """This will raise InvalidDependString if necessary. If trees is
5887                 None then self._filtered_trees is used."""
5888                 pkgsettings = self.pkgsettings[root]
5889                 if trees is None:
5890                         trees = self._filtered_trees
5891                 if not getattr(priority, "buildtime", False):
5892                         # The parent should only be passed to dep_check() for buildtime
5893                         # dependencies since that's the only case when it's appropriate
5894                         # to trigger the circular dependency avoidance code which uses it.
5895                         # It's important not to trigger the same circular dependency
5896                         # avoidance code for runtime dependencies since it's not needed
5897                         # and it can promote an incorrect package choice.
5898                         parent = None
5899                 if True:
5900                         try:
5901                                 if parent is not None:
5902                                         trees[root]["parent"] = parent
5903                                 if not strict:
5904                                         portage.dep._dep_check_strict = False
5905                                 mycheck = portage.dep_check(depstring, None,
5906                                         pkgsettings, myuse=myuse,
5907                                         myroot=root, trees=trees)
5908                         finally:
5909                                 if parent is not None:
5910                                         trees[root].pop("parent")
5911                                 portage.dep._dep_check_strict = True
5912                         if not mycheck[0]:
5913                                 raise portage.exception.InvalidDependString(mycheck[1])
5914                         selected_atoms = mycheck[1]
5915                 return selected_atoms
5916
5917         def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5918                 atom = portage.dep.Atom(atom)
5919                 atom_set = InternalPackageSet(initial_atoms=(atom,))
5920                 atom_without_use = atom
5921                 if atom.use:
5922                         atom_without_use = portage.dep.remove_slot(atom)
5923                         if atom.slot:
5924                                 atom_without_use += ":" + atom.slot
5925                         atom_without_use = portage.dep.Atom(atom_without_use)
5926                 xinfo = '"%s"' % atom
5927                 if arg:
5928                         xinfo='"%s"' % arg
5929                 # Discard null/ from failed cpv_expand category expansion.
5930                 xinfo = xinfo.replace("null/", "")
5931                 masked_packages = []
5932                 missing_use = []
5933                 missing_licenses = []
5934                 have_eapi_mask = False
5935                 pkgsettings = self.pkgsettings[root]
5936                 implicit_iuse = pkgsettings._get_implicit_iuse()
5937                 root_config = self.roots[root]
5938                 portdb = self.roots[root].trees["porttree"].dbapi
5939                 dbs = self._filtered_trees[root]["dbs"]
5940                 for db, pkg_type, built, installed, db_keys in dbs:
5941                         if installed:
5942                                 continue
5943                         match = db.match
5944                         if hasattr(db, "xmatch"):
5945                                 cpv_list = db.xmatch("match-all", atom_without_use)
5946                         else:
5947                                 cpv_list = db.match(atom_without_use)
5948                         # descending order
5949                         cpv_list.reverse()
5950                         for cpv in cpv_list:
5951                                 metadata, mreasons  = get_mask_info(root_config, cpv,
5952                                         pkgsettings, db, pkg_type, built, installed, db_keys)
5953                                 if metadata is not None:
5954                                         pkg = Package(built=built, cpv=cpv,
5955                                                 installed=installed, metadata=metadata,
5956                                                 root_config=root_config)
5957                                         if pkg.cp != atom.cp:
5958                                                 # A cpv can be returned from dbapi.match() as an
5959                                                 # old-style virtual match even in cases when the
5960                                                 # package does not actually PROVIDE the virtual.
5961                                                 # Filter out any such false matches here.
5962                                                 if not atom_set.findAtomForPackage(pkg):
5963                                                         continue
5964                                         if atom.use and not mreasons:
5965                                                 missing_use.append(pkg)
5966                                                 continue
5967                                 masked_packages.append(
5968                                         (root_config, pkgsettings, cpv, metadata, mreasons))
5969
5970                 missing_use_reasons = []
5971                 missing_iuse_reasons = []
5972                 for pkg in missing_use:
5973                         use = pkg.use.enabled
5974                         iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
5975                         iuse_re = re.compile("^(%s)$" % "|".join(iuse))
5976                         missing_iuse = []
5977                         for x in atom.use.required:
5978                                 if iuse_re.match(x) is None:
5979                                         missing_iuse.append(x)
5980                         mreasons = []
5981                         if missing_iuse:
5982                                 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
5983                                 missing_iuse_reasons.append((pkg, mreasons))
5984                         else:
5985                                 need_enable = sorted(atom.use.enabled.difference(use))
5986                                 need_disable = sorted(atom.use.disabled.intersection(use))
5987                                 if need_enable or need_disable:
5988                                         changes = []
5989                                         changes.extend(colorize("red", "+" + x) \
5990                                                 for x in need_enable)
5991                                         changes.extend(colorize("blue", "-" + x) \
5992                                                 for x in need_disable)
5993                                         mreasons.append("Change USE: %s" % " ".join(changes))
5994                                         missing_use_reasons.append((pkg, mreasons))
5995
5996                 if missing_iuse_reasons and not missing_use_reasons:
5997                         missing_use_reasons = missing_iuse_reasons
5998                 elif missing_use_reasons:
5999                         # Only show the latest version.
6000                         del missing_use_reasons[1:]
6001
6002                 if missing_use_reasons:
6003                         print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
6004                         print "!!! One of the following packages is required to complete your request:"
6005                         for pkg, mreasons in missing_use_reasons:
6006                                 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
6007
6008                 elif masked_packages:
6009                         print "\n!!! " + \
6010                                 colorize("BAD", "All ebuilds that could satisfy ") + \
6011                                 colorize("INFORM", xinfo) + \
6012                                 colorize("BAD", " have been masked.")
6013                         print "!!! One of the following masked packages is required to complete your request:"
6014                         have_eapi_mask = show_masked_packages(masked_packages)
6015                         if have_eapi_mask:
6016                                 print
6017                                 msg = ("The current version of portage supports " + \
6018                                         "EAPI '%s'. You must upgrade to a newer version" + \
6019                                         " of portage before EAPI masked packages can" + \
6020                                         " be installed.") % portage.const.EAPI
6021                                 from textwrap import wrap
6022                                 for line in wrap(msg, 75):
6023                                         print line
6024                         print
6025                         show_mask_docs()
6026                 else:
6027                         print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
6028
6029                 # Show parent nodes and the argument that pulled them in.
6030                 traversed_nodes = set()
6031                 node = myparent
6032                 msg = []
6033                 while node is not None:
6034                         traversed_nodes.add(node)
6035                         msg.append('(dependency required by "%s" [%s])' % \
6036                                 (colorize('INFORM', str(node.cpv)), node.type_name))
6037                         # When traversing to parents, prefer arguments over packages
6038                         # since arguments are root nodes. Never traverse the same
6039                         # package twice, in order to prevent an infinite loop.
6040                         selected_parent = None
6041                         for parent in self.digraph.parent_nodes(node):
6042                                 if isinstance(parent, DependencyArg):
6043                                         msg.append('(dependency required by "%s" [argument])' % \
6044                                                 (colorize('INFORM', str(parent))))
6045                                         selected_parent = None
6046                                         break
6047                                 if parent not in traversed_nodes:
6048                                         selected_parent = parent
6049                         node = selected_parent
6050                 for line in msg:
6051                         print line
6052
6053                 print
6054
6055         def _select_pkg_highest_available(self, root, atom, onlydeps=False):
6056                 cache_key = (root, atom, onlydeps)
6057                 ret = self._highest_pkg_cache.get(cache_key)
6058                 if ret is not None:
6059                         pkg, existing = ret
6060                         if pkg and not existing:
6061                                 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
6062                                 if existing and existing == pkg:
6063                                         # Update the cache to reflect that the
6064                                         # package has been added to the graph.
6065                                         ret = pkg, pkg
6066                                         self._highest_pkg_cache[cache_key] = ret
6067                         return ret
6068                 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
6069                 self._highest_pkg_cache[cache_key] = ret
6070                 pkg, existing = ret
6071                 if pkg is not None:
6072                         settings = pkg.root_config.settings
6073                         if visible(settings, pkg) and not (pkg.installed and \
6074                                 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
6075                                 pkg.root_config.visible_pkgs.cpv_inject(pkg)
6076                 return ret
6077
6078         def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
6079                 root_config = self.roots[root]
6080                 pkgsettings = self.pkgsettings[root]
6081                 dbs = self._filtered_trees[root]["dbs"]
6082                 vardb = self.roots[root].trees["vartree"].dbapi
6083                 portdb = self.roots[root].trees["porttree"].dbapi
6084                 # List of acceptable packages, ordered by type preference.
6085                 matched_packages = []
6086                 highest_version = None
6087                 if not isinstance(atom, portage.dep.Atom):
6088                         atom = portage.dep.Atom(atom)
6089                 atom_cp = atom.cp
6090                 atom_set = InternalPackageSet(initial_atoms=(atom,))
6091                 existing_node = None
6092                 myeb = None
6093                 usepkgonly = "--usepkgonly" in self.myopts
6094                 empty = "empty" in self.myparams
6095                 selective = "selective" in self.myparams
6096                 reinstall = False
6097                 noreplace = "--noreplace" in self.myopts
6098                 # Behavior of the "selective" parameter depends on
6099                 # whether or not a package matches an argument atom.
6100                 # If an installed package provides an old-style
6101                 # virtual that is no longer provided by an available
6102                 # package, the installed package may match an argument
6103                 # atom even though none of the available packages do.
6104                 # Therefore, "selective" logic does not consider
6105                 # whether or not an installed package matches an
6106                 # argument atom. It only considers whether or not
6107                 # available packages match argument atoms, which is
6108                 # represented by the found_available_arg flag.
6109                 found_available_arg = False
6110                 for find_existing_node in True, False:
6111                         if existing_node:
6112                                 break
6113                         for db, pkg_type, built, installed, db_keys in dbs:
6114                                 if existing_node:
6115                                         break
6116                                 if installed and not find_existing_node:
6117                                         want_reinstall = reinstall or empty or \
6118                                                 (found_available_arg and not selective)
6119                                         if want_reinstall and matched_packages:
6120                                                 continue
6121                                 if hasattr(db, "xmatch"):
6122                                         cpv_list = db.xmatch("match-all", atom)
6123                                 else:
6124                                         cpv_list = db.match(atom)
6125
6126                                 # USE=multislot can make an installed package appear as if
6127                                 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
6128                                 # won't do any good as long as USE=multislot is enabled since
6129                                 # the newly built package still won't have the expected slot.
6130                                 # Therefore, assume that such SLOT dependencies are already
6131                                 # satisfied rather than forcing a rebuild.
6132                                 if installed and not cpv_list and atom.slot:
6133                                         for cpv in db.match(atom.cp):
6134                                                 slot_available = False
6135                                                 for other_db, other_type, other_built, \
6136                                                         other_installed, other_keys in dbs:
6137                                                         try:
6138                                                                 if atom.slot == \
6139                                                                         other_db.aux_get(cpv, ["SLOT"])[0]:
6140                                                                         slot_available = True
6141                                                                         break
6142                                                         except KeyError:
6143                                                                 pass
6144                                                 if not slot_available:
6145                                                         continue
6146                                                 inst_pkg = self._pkg(cpv, "installed",
6147                                                         root_config, installed=installed)
6148                                                 # Remove the slot from the atom and verify that
6149                                                 # the package matches the resulting atom.
6150                                                 atom_without_slot = portage.dep.remove_slot(atom)
6151                                                 if atom.use:
6152                                                         atom_without_slot += str(atom.use)
6153                                                 atom_without_slot = portage.dep.Atom(atom_without_slot)
6154                                                 if portage.match_from_list(
6155                                                         atom_without_slot, [inst_pkg]):
6156                                                         cpv_list = [inst_pkg.cpv]
6157                                                 break
6158
6159                                 if not cpv_list:
6160                                         continue
6161                                 pkg_status = "merge"
6162                                 if installed or onlydeps:
6163                                         pkg_status = "nomerge"
6164                                 # descending order
6165                                 cpv_list.reverse()
6166                                 for cpv in cpv_list:
6167                                         # Make --noreplace take precedence over --newuse.
6168                                         if not installed and noreplace and \
6169                                                 cpv in vardb.match(atom):
6170                                                 # If the installed version is masked, it may
6171                                                 # be necessary to look at lower versions,
6172                                                 # in case there is a visible downgrade.
6173                                                 continue
6174                                         reinstall_for_flags = None
6175                                         cache_key = (pkg_type, root, cpv, pkg_status)
6176                                         calculated_use = True
6177                                         pkg = self._pkg_cache.get(cache_key)
6178                                         if pkg is None:
6179                                                 calculated_use = False
6180                                                 try:
6181                                                         metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6182                                                 except KeyError:
6183                                                         continue
6184                                                 pkg = Package(built=built, cpv=cpv,
6185                                                         installed=installed, metadata=metadata,
6186                                                         onlydeps=onlydeps, root_config=root_config,
6187                                                         type_name=pkg_type)
6188                                                 metadata = pkg.metadata
6189                                                 if not built and ("?" in metadata["LICENSE"] or \
6190                                                         "?" in metadata["PROVIDE"]):
6191                                                         # This is avoided whenever possible because
6192                                                         # it's expensive. It only needs to be done here
6193                                                         # if it has an effect on visibility.
6194                                                         pkgsettings.setcpv(pkg)
6195                                                         metadata["USE"] = pkgsettings["PORTAGE_USE"]
6196                                                         calculated_use = True
6197                                                 self._pkg_cache[pkg] = pkg
6198
6199                                         if not installed or (built and matched_packages):
6200                                                 # Only enforce visibility on installed packages
6201                                                 # if there is at least one other visible package
6202                                                 # available. By filtering installed masked packages
6203                                                 # here, packages that have been masked since they
6204                                                 # were installed can be automatically downgraded
6205                                                 # to an unmasked version.
6206                                                 try:
6207                                                         if not visible(pkgsettings, pkg):
6208                                                                 continue
6209                                                 except portage.exception.InvalidDependString:
6210                                                         if not installed:
6211                                                                 continue
6212
6213                                                 # Enable upgrade or downgrade to a version
6214                                                 # with visible KEYWORDS when the installed
6215                                                 # version is masked by KEYWORDS, but never
6216                                                 # reinstall the same exact version only due
6217                                                 # to a KEYWORDS mask.
6218                                                 if built and matched_packages:
6219
6220                                                         different_version = None
6221                                                         for avail_pkg in matched_packages:
6222                                                                 if not portage.dep.cpvequal(
6223                                                                         pkg.cpv, avail_pkg.cpv):
6224                                                                         different_version = avail_pkg
6225                                                                         break
6226                                                         if different_version is not None:
6227
6228                                                                 if installed and \
6229                                                                         pkgsettings._getMissingKeywords(
6230                                                                         pkg.cpv, pkg.metadata):
6231                                                                         continue
6232
6233                                                                 # If the ebuild no longer exists or it's
6234                                                                 # keywords have been dropped, reject built
6235                                                                 # instances (installed or binary).
6236                                                                 # If --usepkgonly is enabled, assume that
6237                                                                 # the ebuild status should be ignored.
6238                                                                 if not usepkgonly:
6239                                                                         try:
6240                                                                                 pkg_eb = self._pkg(
6241                                                                                         pkg.cpv, "ebuild", root_config)
6242                                                                         except portage.exception.PackageNotFound:
6243                                                                                 continue
6244                                                                         else:
6245                                                                                 if not visible(pkgsettings, pkg_eb):
6246                                                                                         continue
6247
6248                                         if not pkg.built and not calculated_use:
6249                                                 # This is avoided whenever possible because
6250                                                 # it's expensive.
6251                                                 pkgsettings.setcpv(pkg)
6252                                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6253
6254                                         if pkg.cp != atom.cp:
6255                                                 # A cpv can be returned from dbapi.match() as an
6256                                                 # old-style virtual match even in cases when the
6257                                                 # package does not actually PROVIDE the virtual.
6258                                                 # Filter out any such false matches here.
6259                                                 if not atom_set.findAtomForPackage(pkg):
6260                                                         continue
6261
6262                                         myarg = None
6263                                         if root == self.target_root:
6264                                                 try:
6265                                                         # Ebuild USE must have been calculated prior
6266                                                         # to this point, in case atoms have USE deps.
6267                                                         myarg = self._iter_atoms_for_pkg(pkg).next()
6268                                                 except StopIteration:
6269                                                         pass
6270                                                 except portage.exception.InvalidDependString:
6271                                                         if not installed:
6272                                                                 # masked by corruption
6273                                                                 continue
6274                                         if not installed and myarg:
6275                                                 found_available_arg = True
6276
6277                                         if atom.use and not pkg.built:
6278                                                 use = pkg.use.enabled
6279                                                 if atom.use.enabled.difference(use):
6280                                                         continue
6281                                                 if atom.use.disabled.intersection(use):
6282                                                         continue
6283                                         if pkg.cp == atom_cp:
6284                                                 if highest_version is None:
6285                                                         highest_version = pkg
6286                                                 elif pkg > highest_version:
6287                                                         highest_version = pkg
6288                                         # At this point, we've found the highest visible
6289                                         # match from the current repo. Any lower versions
6290                                         # from this repo are ignored, so this so the loop
6291                                         # will always end with a break statement below
6292                                         # this point.
6293                                         if find_existing_node:
6294                                                 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6295                                                 if not e_pkg:
6296                                                         break
6297                                                 if portage.dep.match_from_list(atom, [e_pkg]):
6298                                                         if highest_version and \
6299                                                                 e_pkg.cp == atom_cp and \
6300                                                                 e_pkg < highest_version and \
6301                                                                 e_pkg.slot_atom != highest_version.slot_atom:
6302                                                                 # There is a higher version available in a
6303                                                                 # different slot, so this existing node is
6304                                                                 # irrelevant.
6305                                                                 pass
6306                                                         else:
6307                                                                 matched_packages.append(e_pkg)
6308                                                                 existing_node = e_pkg
6309                                                 break
6310                                         # Compare built package to current config and
6311                                         # reject the built package if necessary.
6312                                         if built and not installed and \
6313                                                 ("--newuse" in self.myopts or \
6314                                                 "--reinstall" in self.myopts):
6315                                                 iuses = pkg.iuse.all
6316                                                 old_use = pkg.use.enabled
6317                                                 if myeb:
6318                                                         pkgsettings.setcpv(myeb)
6319                                                 else:
6320                                                         pkgsettings.setcpv(pkg)
6321                                                 now_use = pkgsettings["PORTAGE_USE"].split()
6322                                                 forced_flags = set()
6323                                                 forced_flags.update(pkgsettings.useforce)
6324                                                 forced_flags.update(pkgsettings.usemask)
6325                                                 cur_iuse = iuses
6326                                                 if myeb and not usepkgonly:
6327                                                         cur_iuse = myeb.iuse.all
6328                                                 if self._reinstall_for_flags(forced_flags,
6329                                                         old_use, iuses,
6330                                                         now_use, cur_iuse):
6331                                                         break
6332                                         # Compare current config to installed package
6333                                         # and do not reinstall if possible.
6334                                         if not installed and \
6335                                                 ("--newuse" in self.myopts or \
6336                                                 "--reinstall" in self.myopts) and \
6337                                                 cpv in vardb.match(atom):
6338                                                 pkgsettings.setcpv(pkg)
6339                                                 forced_flags = set()
6340                                                 forced_flags.update(pkgsettings.useforce)
6341                                                 forced_flags.update(pkgsettings.usemask)
6342                                                 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6343                                                 old_iuse = set(filter_iuse_defaults(
6344                                                         vardb.aux_get(cpv, ["IUSE"])[0].split()))
6345                                                 cur_use = pkgsettings["PORTAGE_USE"].split()
6346                                                 cur_iuse = pkg.iuse.all
6347                                                 reinstall_for_flags = \
6348                                                         self._reinstall_for_flags(
6349                                                         forced_flags, old_use, old_iuse,
6350                                                         cur_use, cur_iuse)
6351                                                 if reinstall_for_flags:
6352                                                         reinstall = True
6353                                         if not built:
6354                                                 myeb = pkg
6355                                         matched_packages.append(pkg)
6356                                         if reinstall_for_flags:
6357                                                 self._reinstall_nodes[pkg] = \
6358                                                         reinstall_for_flags
6359                                         break
6360
6361                 if not matched_packages:
6362                         return None, None
6363
6364                 if "--debug" in self.myopts:
6365                         for pkg in matched_packages:
6366                                 portage.writemsg("%s %s\n" % \
6367                                         ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6368
6369                 # Filter out any old-style virtual matches if they are
6370                 # mixed with new-style virtual matches.
6371                 cp = portage.dep_getkey(atom)
6372                 if len(matched_packages) > 1 and \
6373                         "virtual" == portage.catsplit(cp)[0]:
6374                         for pkg in matched_packages:
6375                                 if pkg.cp != cp:
6376                                         continue
6377                                 # Got a new-style virtual, so filter
6378                                 # out any old-style virtuals.
6379                                 matched_packages = [pkg for pkg in matched_packages \
6380                                         if pkg.cp == cp]
6381                                 break
6382
6383                 if len(matched_packages) > 1:
6384                         bestmatch = portage.best(
6385                                 [pkg.cpv for pkg in matched_packages])
6386                         matched_packages = [pkg for pkg in matched_packages \
6387                                 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6388
6389                 # ordered by type preference ("ebuild" type is the last resort)
6390                 return  matched_packages[-1], existing_node
6391
6392         def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6393                 """
6394                 Select packages that have already been added to the graph or
6395                 those that are installed and have not been scheduled for
6396                 replacement.
6397                 """
6398                 graph_db = self._graph_trees[root]["porttree"].dbapi
6399                 matches = graph_db.match_pkgs(atom)
6400                 if not matches:
6401                         return None, None
6402                 pkg = matches[-1] # highest match
6403                 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
6404                 return pkg, in_graph
6405
6406         def _complete_graph(self):
6407                 """
6408                 Add any deep dependencies of required sets (args, system, world) that
6409                 have not been pulled into the graph yet. This ensures that the graph
6410                 is consistent such that initially satisfied deep dependencies are not
6411                 broken in the new graph. Initially unsatisfied dependencies are
6412                 irrelevant since we only want to avoid breaking dependencies that are
6413                 intially satisfied.
6414
6415                 Since this method can consume enough time to disturb users, it is
6416                 currently only enabled by the --complete-graph option.
6417                 """
6418                 if "--buildpkgonly" in self.myopts or \
6419                         "recurse" not in self.myparams:
6420                         return 1
6421
6422                 if "complete" not in self.myparams:
6423                         # Skip this to avoid consuming enough time to disturb users.
6424                         return 1
6425
6426                 # Put the depgraph into a mode that causes it to only
6427                 # select packages that have already been added to the
6428                 # graph or those that are installed and have not been
6429                 # scheduled for replacement. Also, toggle the "deep"
6430                 # parameter so that all dependencies are traversed and
6431                 # accounted for.
6432                 self._select_atoms = self._select_atoms_from_graph
6433                 self._select_package = self._select_pkg_from_graph
6434                 already_deep = "deep" in self.myparams
6435                 if not already_deep:
6436                         self.myparams.add("deep")
6437
6438                 for root in self.roots:
6439                         required_set_names = self._required_set_names.copy()
6440                         if root == self.target_root and \
6441                                 (already_deep or "empty" in self.myparams):
6442                                 required_set_names.difference_update(self._sets)
6443                         if not required_set_names and not self._ignored_deps:
6444                                 continue
6445                         root_config = self.roots[root]
6446                         setconfig = root_config.setconfig
6447                         args = []
6448                         # Reuse existing SetArg instances when available.
6449                         for arg in self.digraph.root_nodes():
6450                                 if not isinstance(arg, SetArg):
6451                                         continue
6452                                 if arg.root_config != root_config:
6453                                         continue
6454                                 if arg.name in required_set_names:
6455                                         args.append(arg)
6456                                         required_set_names.remove(arg.name)
6457                         # Create new SetArg instances only when necessary.
6458                         for s in required_set_names:
6459                                 expanded_set = InternalPackageSet(
6460                                         initial_atoms=setconfig.getSetAtoms(s))
6461                                 atom = SETPREFIX + s
6462                                 args.append(SetArg(arg=atom, set=expanded_set,
6463                                         root_config=root_config))
6464                         vardb = root_config.trees["vartree"].dbapi
6465                         for arg in args:
6466                                 for atom in arg.set:
6467                                         self._dep_stack.append(
6468                                                 Dependency(atom=atom, root=root, parent=arg))
6469                         if self._ignored_deps:
6470                                 self._dep_stack.extend(self._ignored_deps)
6471                                 self._ignored_deps = []
6472                         if not self._create_graph(allow_unsatisfied=True):
6473                                 return 0
6474                         # Check the unsatisfied deps to see if any initially satisfied deps
6475                         # will become unsatisfied due to an upgrade. Initially unsatisfied
6476                         # deps are irrelevant since we only want to avoid breaking deps
6477                         # that are initially satisfied.
6478                         while self._unsatisfied_deps:
6479                                 dep = self._unsatisfied_deps.pop()
6480                                 matches = vardb.match_pkgs(dep.atom)
6481                                 if not matches:
6482                                         self._initially_unsatisfied_deps.append(dep)
6483                                         continue
6484                                 # An scheduled installation broke a deep dependency.
6485                                 # Add the installed package to the graph so that it
6486                                 # will be appropriately reported as a slot collision
6487                                 # (possibly solvable via backtracking).
6488                                 pkg = matches[-1] # highest match
6489                                 if not self._add_pkg(pkg, dep):
6490                                         return 0
6491                                 if not self._create_graph(allow_unsatisfied=True):
6492                                         return 0
6493                 return 1
6494
6495         def _pkg(self, cpv, type_name, root_config, installed=False):
6496                 """
6497                 Get a package instance from the cache, or create a new
6498                 one if necessary. Raises KeyError from aux_get if it
6499                 failures for some reason (package does not exist or is
6500                 corrupt).
6501                 """
6502                 operation = "merge"
6503                 if installed:
6504                         operation = "nomerge"
6505                 pkg = self._pkg_cache.get(
6506                         (type_name, root_config.root, cpv, operation))
6507                 if pkg is None:
6508                         tree_type = self.pkg_tree_map[type_name]
6509                         db = root_config.trees[tree_type].dbapi
6510                         db_keys = list(self._trees_orig[root_config.root][
6511                                 tree_type].dbapi._aux_cache_keys)
6512                         try:
6513                                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6514                         except KeyError:
6515                                 raise portage.exception.PackageNotFound(cpv)
6516                         pkg = Package(cpv=cpv, metadata=metadata,
6517                                 root_config=root_config, installed=installed)
6518                         if type_name == "ebuild":
6519                                 settings = self.pkgsettings[root_config.root]
6520                                 settings.setcpv(pkg)
6521                                 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6522                         self._pkg_cache[pkg] = pkg
6523                 return pkg
6524
6525         def validate_blockers(self):
6526                 """Remove any blockers from the digraph that do not match any of the
6527                 packages within the graph.  If necessary, create hard deps to ensure
6528                 correct merge order such that mutually blocking packages are never
6529                 installed simultaneously."""
6530
6531                 if "--buildpkgonly" in self.myopts or \
6532                         "--nodeps" in self.myopts:
6533                         return True
6534
6535                 #if "deep" in self.myparams:
6536                 if True:
6537                         # Pull in blockers from all installed packages that haven't already
6538                         # been pulled into the depgraph.  This is not enabled by default
6539                         # due to the performance penalty that is incurred by all the
6540                         # additional dep_check calls that are required.
6541
6542                         dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6543                         for myroot in self.trees:
6544                                 vardb = self.trees[myroot]["vartree"].dbapi
6545                                 portdb = self.trees[myroot]["porttree"].dbapi
6546                                 pkgsettings = self.pkgsettings[myroot]
6547                                 final_db = self.mydbapi[myroot]
6548
6549                                 blocker_cache = BlockerCache(myroot, vardb)
6550                                 stale_cache = set(blocker_cache)
6551                                 for pkg in vardb:
6552                                         cpv = pkg.cpv
6553                                         stale_cache.discard(cpv)
6554                                         pkg_in_graph = self.digraph.contains(pkg)
6555
6556                                         # Check for masked installed packages. Only warn about
6557                                         # packages that are in the graph in order to avoid warning
6558                                         # about those that will be automatically uninstalled during
6559                                         # the merge process or by --depclean.
6560                                         if pkg in final_db:
6561                                                 if pkg_in_graph and not visible(pkgsettings, pkg):
6562                                                         self._masked_installed.add(pkg)
6563
6564                                         blocker_atoms = None
6565                                         blockers = None
6566                                         if pkg_in_graph:
6567                                                 blockers = []
6568                                                 try:
6569                                                         blockers.extend(
6570                                                                 self._blocker_parents.child_nodes(pkg))
6571                                                 except KeyError:
6572                                                         pass
6573                                                 try:
6574                                                         blockers.extend(
6575                                                                 self._irrelevant_blockers.child_nodes(pkg))
6576                                                 except KeyError:
6577                                                         pass
6578                                         if blockers is not None:
6579                                                 blockers = set(str(blocker.atom) \
6580                                                         for blocker in blockers)
6581
6582                                         # If this node has any blockers, create a "nomerge"
6583                                         # node for it so that they can be enforced.
6584                                         self.spinner.update()
6585                                         blocker_data = blocker_cache.get(cpv)
6586                                         if blocker_data is not None and \
6587                                                 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6588                                                 blocker_data = None
6589
6590                                         # If blocker data from the graph is available, use
6591                                         # it to validate the cache and update the cache if
6592                                         # it seems invalid.
6593                                         if blocker_data is not None and \
6594                                                 blockers is not None:
6595                                                 if not blockers.symmetric_difference(
6596                                                         blocker_data.atoms):
6597                                                         continue
6598                                                 blocker_data = None
6599
6600                                         if blocker_data is None and \
6601                                                 blockers is not None:
6602                                                 # Re-use the blockers from the graph.
6603                                                 blocker_atoms = sorted(blockers)
6604                                                 counter = long(pkg.metadata["COUNTER"])
6605                                                 blocker_data = \
6606                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6607                                                 blocker_cache[pkg.cpv] = blocker_data
6608                                                 continue
6609
6610                                         if blocker_data:
6611                                                 blocker_atoms = blocker_data.atoms
6612                                         else:
6613                                                 # Use aux_get() to trigger FakeVartree global
6614                                                 # updates on *DEPEND when appropriate.
6615                                                 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6616                                                 # It is crucial to pass in final_db here in order to
6617                                                 # optimize dep_check calls by eliminating atoms via
6618                                                 # dep_wordreduce and dep_eval calls.
6619                                                 try:
6620                                                         portage.dep._dep_check_strict = False
6621                                                         try:
6622                                                                 success, atoms = portage.dep_check(depstr,
6623                                                                         final_db, pkgsettings, myuse=pkg.use.enabled,
6624                                                                         trees=self._graph_trees, myroot=myroot)
6625                                                         except Exception, e:
6626                                                                 if isinstance(e, SystemExit):
6627                                                                         raise
6628                                                                 # This is helpful, for example, if a ValueError
6629                                                                 # is thrown from cpv_expand due to multiple
6630                                                                 # matches (this can happen if an atom lacks a
6631                                                                 # category).
6632                                                                 show_invalid_depstring_notice(
6633                                                                         pkg, depstr, str(e))
6634                                                                 del e
6635                                                                 raise
6636                                                 finally:
6637                                                         portage.dep._dep_check_strict = True
6638                                                 if not success:
6639                                                         replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6640                                                         if replacement_pkg and \
6641                                                                 replacement_pkg[0].operation == "merge":
6642                                                                 # This package is being replaced anyway, so
6643                                                                 # ignore invalid dependencies so as not to
6644                                                                 # annoy the user too much (otherwise they'd be
6645                                                                 # forced to manually unmerge it first).
6646                                                                 continue
6647                                                         show_invalid_depstring_notice(pkg, depstr, atoms)
6648                                                         return False
6649                                                 blocker_atoms = [myatom for myatom in atoms \
6650                                                         if myatom.startswith("!")]
6651                                                 blocker_atoms.sort()
6652                                                 counter = long(pkg.metadata["COUNTER"])
6653                                                 blocker_cache[cpv] = \
6654                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6655                                         if blocker_atoms:
6656                                                 try:
6657                                                         for atom in blocker_atoms:
6658                                                                 blocker = Blocker(atom=portage.dep.Atom(atom),
6659                                                                         eapi=pkg.metadata["EAPI"], root=myroot)
6660                                                                 self._blocker_parents.add(blocker, pkg)
6661                                                 except portage.exception.InvalidAtom, e:
6662                                                         depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6663                                                         show_invalid_depstring_notice(
6664                                                                 pkg, depstr, "Invalid Atom: %s" % (e,))
6665                                                         return False
6666                                 for cpv in stale_cache:
6667                                         del blocker_cache[cpv]
6668                                 blocker_cache.flush()
6669                                 del blocker_cache
6670
6671                 # Discard any "uninstall" tasks scheduled by previous calls
6672                 # to this method, since those tasks may not make sense given
6673                 # the current graph state.
6674                 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6675                 if previous_uninstall_tasks:
6676                         self._blocker_uninstalls = digraph()
6677                         self.digraph.difference_update(previous_uninstall_tasks)
6678
6679                 for blocker in self._blocker_parents.leaf_nodes():
6680                         self.spinner.update()
6681                         root_config = self.roots[blocker.root]
6682                         virtuals = root_config.settings.getvirtuals()
6683                         myroot = blocker.root
6684                         initial_db = self.trees[myroot]["vartree"].dbapi
6685                         final_db = self.mydbapi[myroot]
6686                         
6687                         provider_virtual = False
6688                         if blocker.cp in virtuals and \
6689                                 not self._have_new_virt(blocker.root, blocker.cp):
6690                                 provider_virtual = True
6691
6692                         if provider_virtual:
6693                                 atoms = []
6694                                 for provider_entry in virtuals[blocker.cp]:
6695                                         provider_cp = \
6696                                                 portage.dep_getkey(provider_entry)
6697                                         atoms.append(blocker.atom.replace(
6698                                                 blocker.cp, provider_cp))
6699                         else:
6700                                 atoms = [blocker.atom]
6701
6702                         blocked_initial = []
6703                         for atom in atoms:
6704                                 blocked_initial.extend(initial_db.match_pkgs(atom))
6705
6706                         blocked_final = []
6707                         for atom in atoms:
6708                                 blocked_final.extend(final_db.match_pkgs(atom))
6709
6710                         if not blocked_initial and not blocked_final:
6711                                 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6712                                 self._blocker_parents.remove(blocker)
6713                                 # Discard any parents that don't have any more blockers.
6714                                 for pkg in parent_pkgs:
6715                                         self._irrelevant_blockers.add(blocker, pkg)
6716                                         if not self._blocker_parents.child_nodes(pkg):
6717                                                 self._blocker_parents.remove(pkg)
6718                                 continue
6719                         for parent in self._blocker_parents.parent_nodes(blocker):
6720                                 unresolved_blocks = False
6721                                 depends_on_order = set()
6722                                 for pkg in blocked_initial:
6723                                         if pkg.slot_atom == parent.slot_atom:
6724                                                 # TODO: Support blocks within slots in cases where it
6725                                                 # might make sense.  For example, a new version might
6726                                                 # require that the old version be uninstalled at build
6727                                                 # time.
6728                                                 continue
6729                                         if parent.installed:
6730                                                 # Two currently installed packages conflict with
6731                                                 # eachother. Ignore this case since the damage
6732                                                 # is already done and this would be likely to
6733                                                 # confuse users if displayed like a normal blocker.
6734                                                 continue
6735
6736                                         self._blocked_pkgs.add(pkg, blocker)
6737
6738                                         if parent.operation == "merge":
6739                                                 # Maybe the blocked package can be replaced or simply
6740                                                 # unmerged to resolve this block.
6741                                                 depends_on_order.add((pkg, parent))
6742                                                 continue
6743                                         # None of the above blocker resolutions techniques apply,
6744                                         # so apparently this one is unresolvable.
6745                                         unresolved_blocks = True
6746                                 for pkg in blocked_final:
6747                                         if pkg.slot_atom == parent.slot_atom:
6748                                                 # TODO: Support blocks within slots.
6749                                                 continue
6750                                         if parent.operation == "nomerge" and \
6751                                                 pkg.operation == "nomerge":
6752                                                 # This blocker will be handled the next time that a
6753                                                 # merge of either package is triggered.
6754                                                 continue
6755
6756                                         self._blocked_pkgs.add(pkg, blocker)
6757
6758                                         # Maybe the blocking package can be
6759                                         # unmerged to resolve this block.
6760                                         if parent.operation == "merge" and pkg.installed:
6761                                                 depends_on_order.add((pkg, parent))
6762                                                 continue
6763                                         elif parent.operation == "nomerge":
6764                                                 depends_on_order.add((parent, pkg))
6765                                                 continue
6766                                         # None of the above blocker resolutions techniques apply,
6767                                         # so apparently this one is unresolvable.
6768                                         unresolved_blocks = True
6769
6770                                 # Make sure we don't unmerge any package that have been pulled
6771                                 # into the graph.
6772                                 if not unresolved_blocks and depends_on_order:
6773                                         for inst_pkg, inst_task in depends_on_order:
6774                                                 if self.digraph.contains(inst_pkg) and \
6775                                                         self.digraph.parent_nodes(inst_pkg):
6776                                                         unresolved_blocks = True
6777                                                         break
6778
6779                                 if not unresolved_blocks and depends_on_order:
6780                                         for inst_pkg, inst_task in depends_on_order:
6781                                                 uninst_task = Package(built=inst_pkg.built,
6782                                                         cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6783                                                         metadata=inst_pkg.metadata,
6784                                                         operation="uninstall",
6785                                                         root_config=inst_pkg.root_config,
6786                                                         type_name=inst_pkg.type_name)
6787                                                 self._pkg_cache[uninst_task] = uninst_task
6788                                                 # Enforce correct merge order with a hard dep.
6789                                                 self.digraph.addnode(uninst_task, inst_task,
6790                                                         priority=BlockerDepPriority.instance)
6791                                                 # Count references to this blocker so that it can be
6792                                                 # invalidated after nodes referencing it have been
6793                                                 # merged.
6794                                                 self._blocker_uninstalls.addnode(uninst_task, blocker)
6795                                 if not unresolved_blocks and not depends_on_order:
6796                                         self._irrelevant_blockers.add(blocker, parent)
6797                                         self._blocker_parents.remove_edge(blocker, parent)
6798                                         if not self._blocker_parents.parent_nodes(blocker):
6799                                                 self._blocker_parents.remove(blocker)
6800                                         if not self._blocker_parents.child_nodes(parent):
6801                                                 self._blocker_parents.remove(parent)
6802                                 if unresolved_blocks:
6803                                         self._unsolvable_blockers.add(blocker, parent)
6804
6805                 return True
6806
6807         def _accept_blocker_conflicts(self):
6808                 acceptable = False
6809                 for x in ("--buildpkgonly", "--fetchonly",
6810                         "--fetch-all-uri", "--nodeps"):
6811                         if x in self.myopts:
6812                                 acceptable = True
6813                                 break
6814                 return acceptable
6815
6816         def _merge_order_bias(self, mygraph):
6817                 """
6818                 For optimal leaf node selection, promote deep system runtime deps and
6819                 order nodes from highest to lowest overall reference count.
6820                 """
6821
6822                 node_info = {}
6823                 for node in mygraph.order:
6824                         node_info[node] = len(mygraph.parent_nodes(node))
6825                 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
6826
6827                 def cmp_merge_preference(node1, node2):
6828
6829                         if node1.operation == 'uninstall':
6830                                 if node2.operation == 'uninstall':
6831                                         return 0
6832                                 return 1
6833
6834                         if node2.operation == 'uninstall':
6835                                 if node1.operation == 'uninstall':
6836                                         return 0
6837                                 return -1
6838
6839                         node1_sys = node1 in deep_system_deps
6840                         node2_sys = node2 in deep_system_deps
6841                         if node1_sys != node2_sys:
6842                                 if node1_sys:
6843                                         return -1
6844                                 return 1
6845
6846                         return node_info[node2] - node_info[node1]
6847
6848                 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
6849
6850         def altlist(self, reversed=False):
6851
6852                 while self._serialized_tasks_cache is None:
6853                         self._resolve_conflicts()
6854                         try:
6855                                 self._serialized_tasks_cache, self._scheduler_graph = \
6856                                         self._serialize_tasks()
6857                         except self._serialize_tasks_retry:
6858                                 pass
6859
6860                 retlist = self._serialized_tasks_cache[:]
6861                 if reversed:
6862                         retlist.reverse()
6863                 return retlist
6864
6865         def schedulerGraph(self):
6866                 """
6867                 The scheduler graph is identical to the normal one except that
6868                 uninstall edges are reversed in specific cases that require
6869                 conflicting packages to be temporarily installed simultaneously.
6870                 This is intended for use by the Scheduler in it's parallelization
6871                 logic. It ensures that temporary simultaneous installation of
6872                 conflicting packages is avoided when appropriate (especially for
6873                 !!atom blockers), but allowed in specific cases that require it.
6874
6875                 Note that this method calls break_refs() which alters the state of
6876                 internal Package instances such that this depgraph instance should
6877                 not be used to perform any more calculations.
6878                 """
6879                 if self._scheduler_graph is None:
6880                         self.altlist()
6881                 self.break_refs(self._scheduler_graph.order)
6882                 return self._scheduler_graph
6883
6884         def break_refs(self, nodes):
6885                 """
6886                 Take a mergelist like that returned from self.altlist() and
6887                 break any references that lead back to the depgraph. This is
6888                 useful if you want to hold references to packages without
6889                 also holding the depgraph on the heap.
6890                 """
6891                 for node in nodes:
6892                         if hasattr(node, "root_config"):
6893                                 # The FakeVartree references the _package_cache which
6894                                 # references the depgraph. So that Package instances don't
6895                                 # hold the depgraph and FakeVartree on the heap, replace
6896                                 # the RootConfig that references the FakeVartree with the
6897                                 # original RootConfig instance which references the actual
6898                                 # vartree.
6899                                 node.root_config = \
6900                                         self._trees_orig[node.root_config.root]["root_config"]
6901
6902         def _resolve_conflicts(self):
6903                 if not self._complete_graph():
6904                         raise self._unknown_internal_error()
6905
6906                 if not self.validate_blockers():
6907                         raise self._unknown_internal_error()
6908
6909                 if self._slot_collision_info:
6910                         self._process_slot_conflicts()
6911
6912         def _serialize_tasks(self):
6913
6914                 if "--debug" in self.myopts:
6915                         writemsg("\ndigraph:\n\n", noiselevel=-1)
6916                         self.digraph.debug_print()
6917                         writemsg("\n", noiselevel=-1)
6918
6919                 scheduler_graph = self.digraph.copy()
6920                 mygraph=self.digraph.copy()
6921                 # Prune "nomerge" root nodes if nothing depends on them, since
6922                 # otherwise they slow down merge order calculation. Don't remove
6923                 # non-root nodes since they help optimize merge order in some cases
6924                 # such as revdep-rebuild.
6925                 removed_nodes = set()
6926                 while True:
6927                         for node in mygraph.root_nodes():
6928                                 if not isinstance(node, Package) or \
6929                                         node.installed or node.onlydeps:
6930                                         removed_nodes.add(node)
6931                         if removed_nodes:
6932                                 self.spinner.update()
6933                                 mygraph.difference_update(removed_nodes)
6934                         if not removed_nodes:
6935                                 break
6936                         removed_nodes.clear()
6937                 self._merge_order_bias(mygraph)
6938                 def cmp_circular_bias(n1, n2):
6939                         """
6940                         RDEPEND is stronger than PDEPEND and this function
6941                         measures such a strength bias within a circular
6942                         dependency relationship.
6943                         """
6944                         n1_n2_medium = n2 in mygraph.child_nodes(n1,
6945                                 ignore_priority=priority_range.ignore_medium_soft)
6946                         n2_n1_medium = n1 in mygraph.child_nodes(n2,
6947                                 ignore_priority=priority_range.ignore_medium_soft)
6948                         if n1_n2_medium == n2_n1_medium:
6949                                 return 0
6950                         elif n1_n2_medium:
6951                                 return 1
6952                         return -1
6953                 myblocker_uninstalls = self._blocker_uninstalls.copy()
6954                 retlist=[]
6955                 # Contains uninstall tasks that have been scheduled to
6956                 # occur after overlapping blockers have been installed.
6957                 scheduled_uninstalls = set()
6958                 # Contains any Uninstall tasks that have been ignored
6959                 # in order to avoid the circular deps code path. These
6960                 # correspond to blocker conflicts that could not be
6961                 # resolved.
6962                 ignored_uninstall_tasks = set()
6963                 have_uninstall_task = False
6964                 complete = "complete" in self.myparams
6965                 asap_nodes = []
6966
6967                 def get_nodes(**kwargs):
6968                         """
6969                         Returns leaf nodes excluding Uninstall instances
6970                         since those should be executed as late as possible.
6971                         """
6972                         return [node for node in mygraph.leaf_nodes(**kwargs) \
6973                                 if isinstance(node, Package) and \
6974                                         (node.operation != "uninstall" or \
6975                                         node in scheduled_uninstalls)]
6976
6977                 # sys-apps/portage needs special treatment if ROOT="/"
6978                 running_root = self._running_root.root
6979                 from portage.const import PORTAGE_PACKAGE_ATOM
6980                 runtime_deps = InternalPackageSet(
6981                         initial_atoms=[PORTAGE_PACKAGE_ATOM])
6982                 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
6983                         PORTAGE_PACKAGE_ATOM)
6984                 replacement_portage = self.mydbapi[running_root].match_pkgs(
6985                         PORTAGE_PACKAGE_ATOM)
6986
6987                 if running_portage:
6988                         running_portage = running_portage[0]
6989                 else:
6990                         running_portage = None
6991
6992                 if replacement_portage:
6993                         replacement_portage = replacement_portage[0]
6994                 else:
6995                         replacement_portage = None
6996
6997                 if replacement_portage == running_portage:
6998                         replacement_portage = None
6999
7000                 if replacement_portage is not None:
7001                         # update from running_portage to replacement_portage asap
7002                         asap_nodes.append(replacement_portage)
7003
7004                 if running_portage is not None:
7005                         try:
7006                                 portage_rdepend = self._select_atoms_highest_available(
7007                                         running_root, running_portage.metadata["RDEPEND"],
7008                                         myuse=running_portage.use.enabled,
7009                                         parent=running_portage, strict=False)
7010                         except portage.exception.InvalidDependString, e:
7011                                 portage.writemsg("!!! Invalid RDEPEND in " + \
7012                                         "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
7013                                         (running_root, running_portage.cpv, e), noiselevel=-1)
7014                                 del e
7015                                 portage_rdepend = []
7016                         runtime_deps.update(atom for atom in portage_rdepend \
7017                                 if not atom.startswith("!"))
7018
7019                 def gather_deps(ignore_priority, mergeable_nodes,
7020                         selected_nodes, node):
7021                         """
7022                         Recursively gather a group of nodes that RDEPEND on
7023                         eachother. This ensures that they are merged as a group
7024                         and get their RDEPENDs satisfied as soon as possible.
7025                         """
7026                         if node in selected_nodes:
7027                                 return True
7028                         if node not in mergeable_nodes:
7029                                 return False
7030                         if node == replacement_portage and \
7031                                 mygraph.child_nodes(node,
7032                                 ignore_priority=priority_range.ignore_medium_soft):
7033                                 # Make sure that portage always has all of it's
7034                                 # RDEPENDs installed first.
7035                                 return False
7036                         selected_nodes.add(node)
7037                         for child in mygraph.child_nodes(node,
7038                                 ignore_priority=ignore_priority):
7039                                 if not gather_deps(ignore_priority,
7040                                         mergeable_nodes, selected_nodes, child):
7041                                         return False
7042                         return True
7043
7044                 def ignore_uninst_or_med(priority):
7045                         if priority is BlockerDepPriority.instance:
7046                                 return True
7047                         return priority_range.ignore_medium(priority)
7048
7049                 def ignore_uninst_or_med_soft(priority):
7050                         if priority is BlockerDepPriority.instance:
7051                                 return True
7052                         return priority_range.ignore_medium_soft(priority)
7053
7054                 tree_mode = "--tree" in self.myopts
7055                 # Tracks whether or not the current iteration should prefer asap_nodes
7056                 # if available.  This is set to False when the previous iteration
7057                 # failed to select any nodes.  It is reset whenever nodes are
7058                 # successfully selected.
7059                 prefer_asap = True
7060
7061                 # Controls whether or not the current iteration should drop edges that
7062                 # are "satisfied" by installed packages, in order to solve circular
7063                 # dependencies. The deep runtime dependencies of installed packages are
7064                 # not checked in this case (bug #199856), so it must be avoided
7065                 # whenever possible.
7066                 drop_satisfied = False
7067
7068                 # State of variables for successive iterations that loosen the
7069                 # criteria for node selection.
7070                 #
7071                 # iteration   prefer_asap   drop_satisfied
7072                 # 1           True          False
7073                 # 2           False         False
7074                 # 3           False         True
7075                 #
7076                 # If no nodes are selected on the last iteration, it is due to
7077                 # unresolved blockers or circular dependencies.
7078
7079                 while not mygraph.empty():
7080                         self.spinner.update()
7081                         selected_nodes = None
7082                         ignore_priority = None
7083                         if drop_satisfied or (prefer_asap and asap_nodes):
7084                                 priority_range = DepPrioritySatisfiedRange
7085                         else:
7086                                 priority_range = DepPriorityNormalRange
7087                         if prefer_asap and asap_nodes:
7088                                 # ASAP nodes are merged before their soft deps. Go ahead and
7089                                 # select root nodes here if necessary, since it's typical for
7090                                 # the parent to have been removed from the graph already.
7091                                 asap_nodes = [node for node in asap_nodes \
7092                                         if mygraph.contains(node)]
7093                                 for node in asap_nodes:
7094                                         if not mygraph.child_nodes(node,
7095                                                 ignore_priority=priority_range.ignore_soft):
7096                                                 selected_nodes = [node]
7097                                                 asap_nodes.remove(node)
7098                                                 break
7099                         if not selected_nodes and \
7100                                 not (prefer_asap and asap_nodes):
7101                                 for i in xrange(priority_range.NONE,
7102                                         priority_range.MEDIUM_SOFT + 1):
7103                                         ignore_priority = priority_range.ignore_priority[i]
7104                                         nodes = get_nodes(ignore_priority=ignore_priority)
7105                                         if nodes:
7106                                                 # If there is a mix of uninstall nodes with other
7107                                                 # types, save the uninstall nodes for later since
7108                                                 # sometimes a merge node will render an uninstall
7109                                                 # node unnecessary (due to occupying the same slot),
7110                                                 # and we want to avoid executing a separate uninstall
7111                                                 # task in that case.
7112                                                 if len(nodes) > 1:
7113                                                         good_uninstalls = []
7114                                                         with_some_uninstalls_excluded = []
7115                                                         for node in nodes:
7116                                                                 if node.operation == "uninstall":
7117                                                                         slot_node = self.mydbapi[node.root
7118                                                                                 ].match_pkgs(node.slot_atom)
7119                                                                         if slot_node and \
7120                                                                                 slot_node[0].operation == "merge":
7121                                                                                 continue
7122                                                                         good_uninstalls.append(node)
7123                                                                 with_some_uninstalls_excluded.append(node)
7124                                                         if good_uninstalls:
7125                                                                 nodes = good_uninstalls
7126                                                         elif with_some_uninstalls_excluded:
7127                                                                 nodes = with_some_uninstalls_excluded
7128                                                         else:
7129                                                                 nodes = nodes
7130
7131                                                 if ignore_priority is None and not tree_mode:
7132                                                         # Greedily pop all of these nodes since no
7133                                                         # relationship has been ignored. This optimization
7134                                                         # destroys --tree output, so it's disabled in tree
7135                                                         # mode.
7136                                                         selected_nodes = nodes
7137                                                 else:
7138                                                         # For optimal merge order:
7139                                                         #  * Only pop one node.
7140                                                         #  * Removing a root node (node without a parent)
7141                                                         #    will not produce a leaf node, so avoid it.
7142                                                         #  * It's normal for a selected uninstall to be a
7143                                                         #    root node, so don't check them for parents.
7144                                                         for node in nodes:
7145                                                                 if node.operation == "uninstall" or \
7146                                                                         mygraph.parent_nodes(node):
7147                                                                         selected_nodes = [node]
7148                                                                         break
7149
7150                                                 if selected_nodes:
7151                                                         break
7152
7153                         if not selected_nodes:
7154                                 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
7155                                 if nodes:
7156                                         mergeable_nodes = set(nodes)
7157                                         if prefer_asap and asap_nodes:
7158                                                 nodes = asap_nodes
7159                                         for i in xrange(priority_range.SOFT,
7160                                                 priority_range.MEDIUM_SOFT + 1):
7161                                                 ignore_priority = priority_range.ignore_priority[i]
7162                                                 for node in nodes:
7163                                                         if not mygraph.parent_nodes(node):
7164                                                                 continue
7165                                                         selected_nodes = set()
7166                                                         if gather_deps(ignore_priority,
7167                                                                 mergeable_nodes, selected_nodes, node):
7168                                                                 break
7169                                                         else:
7170                                                                 selected_nodes = None
7171                                                 if selected_nodes:
7172                                                         break
7173
7174                                         if prefer_asap and asap_nodes and not selected_nodes:
7175                                                 # We failed to find any asap nodes to merge, so ignore
7176                                                 # them for the next iteration.
7177                                                 prefer_asap = False
7178                                                 continue
7179
7180                         if selected_nodes and ignore_priority is not None:
7181                                 # Try to merge ignored medium_soft deps as soon as possible
7182                                 # if they're not satisfied by installed packages.
7183                                 for node in selected_nodes:
7184                                         children = set(mygraph.child_nodes(node))
7185                                         soft = children.difference(
7186                                                 mygraph.child_nodes(node,
7187                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
7188                                         medium_soft = children.difference(
7189                                                 mygraph.child_nodes(node,
7190                                                         ignore_priority = \
7191                                                         DepPrioritySatisfiedRange.ignore_medium_soft))
7192                                         medium_soft.difference_update(soft)
7193                                         for child in medium_soft:
7194                                                 if child in selected_nodes:
7195                                                         continue
7196                                                 if child in asap_nodes:
7197                                                         continue
7198                                                 asap_nodes.append(child)
7199
7200                         if selected_nodes and len(selected_nodes) > 1:
7201                                 if not isinstance(selected_nodes, list):
7202                                         selected_nodes = list(selected_nodes)
7203                                 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
7204
7205                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7206                                 # An Uninstall task needs to be executed in order to
7207                                 # avoid conflict if possible.
7208
7209                                 if drop_satisfied:
7210                                         priority_range = DepPrioritySatisfiedRange
7211                                 else:
7212                                         priority_range = DepPriorityNormalRange
7213
7214                                 mergeable_nodes = get_nodes(
7215                                         ignore_priority=ignore_uninst_or_med)
7216
7217                                 min_parent_deps = None
7218                                 uninst_task = None
7219                                 for task in myblocker_uninstalls.leaf_nodes():
7220                                         # Do some sanity checks so that system or world packages
7221                                         # don't get uninstalled inappropriately here (only really
7222                                         # necessary when --complete-graph has not been enabled).
7223
7224                                         if task in ignored_uninstall_tasks:
7225                                                 continue
7226
7227                                         if task in scheduled_uninstalls:
7228                                                 # It's been scheduled but it hasn't
7229                                                 # been executed yet due to dependence
7230                                                 # on installation of blocking packages.
7231                                                 continue
7232
7233                                         root_config = self.roots[task.root]
7234                                         inst_pkg = self._pkg_cache[
7235                                                 ("installed", task.root, task.cpv, "nomerge")]
7236
7237                                         if self.digraph.contains(inst_pkg):
7238                                                 continue
7239
7240                                         forbid_overlap = False
7241                                         heuristic_overlap = False
7242                                         for blocker in myblocker_uninstalls.parent_nodes(task):
7243                                                 if blocker.eapi in ("0", "1"):
7244                                                         heuristic_overlap = True
7245                                                 elif blocker.atom.blocker.overlap.forbid:
7246                                                         forbid_overlap = True
7247                                                         break
7248                                         if forbid_overlap and running_root == task.root:
7249                                                 continue
7250
7251                                         if heuristic_overlap and running_root == task.root:
7252                                                 # Never uninstall sys-apps/portage or it's essential
7253                                                 # dependencies, except through replacement.
7254                                                 try:
7255                                                         runtime_dep_atoms = \
7256                                                                 list(runtime_deps.iterAtomsForPackage(task))
7257                                                 except portage.exception.InvalidDependString, e:
7258                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7259                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7260                                                                 (task.root, task.cpv, e), noiselevel=-1)
7261                                                         del e
7262                                                         continue
7263
7264                                                 # Don't uninstall a runtime dep if it appears
7265                                                 # to be the only suitable one installed.
7266                                                 skip = False
7267                                                 vardb = root_config.trees["vartree"].dbapi
7268                                                 for atom in runtime_dep_atoms:
7269                                                         other_version = None
7270                                                         for pkg in vardb.match_pkgs(atom):
7271                                                                 if pkg.cpv == task.cpv and \
7272                                                                         pkg.metadata["COUNTER"] == \
7273                                                                         task.metadata["COUNTER"]:
7274                                                                         continue
7275                                                                 other_version = pkg
7276                                                                 break
7277                                                         if other_version is None:
7278                                                                 skip = True
7279                                                                 break
7280                                                 if skip:
7281                                                         continue
7282
7283                                                 # For packages in the system set, don't take
7284                                                 # any chances. If the conflict can't be resolved
7285                                                 # by a normal replacement operation then abort.
7286                                                 skip = False
7287                                                 try:
7288                                                         for atom in root_config.sets[
7289                                                                 "system"].iterAtomsForPackage(task):
7290                                                                 skip = True
7291                                                                 break
7292                                                 except portage.exception.InvalidDependString, e:
7293                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7294                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7295                                                                 (task.root, task.cpv, e), noiselevel=-1)
7296                                                         del e
7297                                                         skip = True
7298                                                 if skip:
7299                                                         continue
7300
7301                                         # Note that the world check isn't always
7302                                         # necessary since self._complete_graph() will
7303                                         # add all packages from the system and world sets to the
7304                                         # graph. This just allows unresolved conflicts to be
7305                                         # detected as early as possible, which makes it possible
7306                                         # to avoid calling self._complete_graph() when it is
7307                                         # unnecessary due to blockers triggering an abortion.
7308                                         if not complete:
7309                                                 # For packages in the world set, go ahead an uninstall
7310                                                 # when necessary, as long as the atom will be satisfied
7311                                                 # in the final state.
7312                                                 graph_db = self.mydbapi[task.root]
7313                                                 skip = False
7314                                                 try:
7315                                                         for atom in root_config.sets[
7316                                                                 "world"].iterAtomsForPackage(task):
7317                                                                 satisfied = False
7318                                                                 for pkg in graph_db.match_pkgs(atom):
7319                                                                         if pkg == inst_pkg:
7320                                                                                 continue
7321                                                                         satisfied = True
7322                                                                         break
7323                                                                 if not satisfied:
7324                                                                         skip = True
7325                                                                         self._blocked_world_pkgs[inst_pkg] = atom
7326                                                                         break
7327                                                 except portage.exception.InvalidDependString, e:
7328                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7329                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7330                                                                 (task.root, task.cpv, e), noiselevel=-1)
7331                                                         del e
7332                                                         skip = True
7333                                                 if skip:
7334                                                         continue
7335
7336                                         # Check the deps of parent nodes to ensure that
7337                                         # the chosen task produces a leaf node. Maybe
7338                                         # this can be optimized some more to make the
7339                                         # best possible choice, but the current algorithm
7340                                         # is simple and should be near optimal for most
7341                                         # common cases.
7342                                         mergeable_parent = False
7343                                         parent_deps = set()
7344                                         for parent in mygraph.parent_nodes(task):
7345                                                 parent_deps.update(mygraph.child_nodes(parent,
7346                                                         ignore_priority=priority_range.ignore_medium_soft))
7347                                                 if parent in mergeable_nodes and \
7348                                                         gather_deps(ignore_uninst_or_med_soft,
7349                                                         mergeable_nodes, set(), parent):
7350                                                         mergeable_parent = True
7351
7352                                         if not mergeable_parent:
7353                                                 continue
7354
7355                                         parent_deps.remove(task)
7356                                         if min_parent_deps is None or \
7357                                                 len(parent_deps) < min_parent_deps:
7358                                                 min_parent_deps = len(parent_deps)
7359                                                 uninst_task = task
7360
7361                                 if uninst_task is not None:
7362                                         # The uninstall is performed only after blocking
7363                                         # packages have been merged on top of it. File
7364                                         # collisions between blocking packages are detected
7365                                         # and removed from the list of files to be uninstalled.
7366                                         scheduled_uninstalls.add(uninst_task)
7367                                         parent_nodes = mygraph.parent_nodes(uninst_task)
7368
7369                                         # Reverse the parent -> uninstall edges since we want
7370                                         # to do the uninstall after blocking packages have
7371                                         # been merged on top of it.
7372                                         mygraph.remove(uninst_task)
7373                                         for blocked_pkg in parent_nodes:
7374                                                 mygraph.add(blocked_pkg, uninst_task,
7375                                                         priority=BlockerDepPriority.instance)
7376                                                 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7377                                                 scheduler_graph.add(blocked_pkg, uninst_task,
7378                                                         priority=BlockerDepPriority.instance)
7379
7380                                         # Reset the state variables for leaf node selection and
7381                                         # continue trying to select leaf nodes.
7382                                         prefer_asap = True
7383                                         drop_satisfied = False
7384                                         continue
7385
7386                         if not selected_nodes:
7387                                 # Only select root nodes as a last resort. This case should
7388                                 # only trigger when the graph is nearly empty and the only
7389                                 # remaining nodes are isolated (no parents or children). Since
7390                                 # the nodes must be isolated, ignore_priority is not needed.
7391                                 selected_nodes = get_nodes()
7392
7393                         if not selected_nodes and not drop_satisfied:
7394                                 drop_satisfied = True
7395                                 continue
7396
7397                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7398                                 # If possible, drop an uninstall task here in order to avoid
7399                                 # the circular deps code path. The corresponding blocker will
7400                                 # still be counted as an unresolved conflict.
7401                                 uninst_task = None
7402                                 for node in myblocker_uninstalls.leaf_nodes():
7403                                         try:
7404                                                 mygraph.remove(node)
7405                                         except KeyError:
7406                                                 pass
7407                                         else:
7408                                                 uninst_task = node
7409                                                 ignored_uninstall_tasks.add(node)
7410                                                 break
7411
7412                                 if uninst_task is not None:
7413                                         # Reset the state variables for leaf node selection and
7414                                         # continue trying to select leaf nodes.
7415                                         prefer_asap = True
7416                                         drop_satisfied = False
7417                                         continue
7418
7419                         if not selected_nodes:
7420                                 self._circular_deps_for_display = mygraph
7421                                 raise self._unknown_internal_error()
7422
7423                         # At this point, we've succeeded in selecting one or more nodes, so
7424                         # reset state variables for leaf node selection.
7425                         prefer_asap = True
7426                         drop_satisfied = False
7427
7428                         mygraph.difference_update(selected_nodes)
7429
7430                         for node in selected_nodes:
7431                                 if isinstance(node, Package) and \
7432                                         node.operation == "nomerge":
7433                                         continue
7434
7435                                 # Handle interactions between blockers
7436                                 # and uninstallation tasks.
7437                                 solved_blockers = set()
7438                                 uninst_task = None
7439                                 if isinstance(node, Package) and \
7440                                         "uninstall" == node.operation:
7441                                         have_uninstall_task = True
7442                                         uninst_task = node
7443                                 else:
7444                                         vardb = self.trees[node.root]["vartree"].dbapi
7445                                         previous_cpv = vardb.match(node.slot_atom)
7446                                         if previous_cpv:
7447                                                 # The package will be replaced by this one, so remove
7448                                                 # the corresponding Uninstall task if necessary.
7449                                                 previous_cpv = previous_cpv[0]
7450                                                 uninst_task = \
7451                                                         ("installed", node.root, previous_cpv, "uninstall")
7452                                                 try:
7453                                                         mygraph.remove(uninst_task)
7454                                                 except KeyError:
7455                                                         pass
7456
7457                                 if uninst_task is not None and \
7458                                         uninst_task not in ignored_uninstall_tasks and \
7459                                         myblocker_uninstalls.contains(uninst_task):
7460                                         blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7461                                         myblocker_uninstalls.remove(uninst_task)
7462                                         # Discard any blockers that this Uninstall solves.
7463                                         for blocker in blocker_nodes:
7464                                                 if not myblocker_uninstalls.child_nodes(blocker):
7465                                                         myblocker_uninstalls.remove(blocker)
7466                                                         solved_blockers.add(blocker)
7467
7468                                 retlist.append(node)
7469
7470                                 if (isinstance(node, Package) and \
7471                                         "uninstall" == node.operation) or \
7472                                         (uninst_task is not None and \
7473                                         uninst_task in scheduled_uninstalls):
7474                                         # Include satisfied blockers in the merge list
7475                                         # since the user might be interested and also
7476                                         # it serves as an indicator that blocking packages
7477                                         # will be temporarily installed simultaneously.
7478                                         for blocker in solved_blockers:
7479                                                 retlist.append(Blocker(atom=blocker.atom,
7480                                                         root=blocker.root, eapi=blocker.eapi,
7481                                                         satisfied=True))
7482
7483                 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7484                 for node in myblocker_uninstalls.root_nodes():
7485                         unsolvable_blockers.add(node)
7486
7487                 for blocker in unsolvable_blockers:
7488                         retlist.append(blocker)
7489
7490                 # If any Uninstall tasks need to be executed in order
7491                 # to avoid a conflict, complete the graph with any
7492                 # dependencies that may have been initially
7493                 # neglected (to ensure that unsafe Uninstall tasks
7494                 # are properly identified and blocked from execution).
7495                 if have_uninstall_task and \
7496                         not complete and \
7497                         not unsolvable_blockers:
7498                         self.myparams.add("complete")
7499                         raise self._serialize_tasks_retry("")
7500
7501                 if unsolvable_blockers and \
7502                         not self._accept_blocker_conflicts():
7503                         self._unsatisfied_blockers_for_display = unsolvable_blockers
7504                         self._serialized_tasks_cache = retlist[:]
7505                         self._scheduler_graph = scheduler_graph
7506                         raise self._unknown_internal_error()
7507
7508                 if self._slot_collision_info and \
7509                         not self._accept_blocker_conflicts():
7510                         self._serialized_tasks_cache = retlist[:]
7511                         self._scheduler_graph = scheduler_graph
7512                         raise self._unknown_internal_error()
7513
7514                 return retlist, scheduler_graph
7515
7516         def _show_circular_deps(self, mygraph):
7517                 # No leaf nodes are available, so we have a circular
7518                 # dependency panic situation.  Reduce the noise level to a
7519                 # minimum via repeated elimination of root nodes since they
7520                 # have no parents and thus can not be part of a cycle.
7521                 while True:
7522                         root_nodes = mygraph.root_nodes(
7523                                 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
7524                         if not root_nodes:
7525                                 break
7526                         mygraph.difference_update(root_nodes)
7527                 # Display the USE flags that are enabled on nodes that are part
7528                 # of dependency cycles in case that helps the user decide to
7529                 # disable some of them.
7530                 display_order = []
7531                 tempgraph = mygraph.copy()
7532                 while not tempgraph.empty():
7533                         nodes = tempgraph.leaf_nodes()
7534                         if not nodes:
7535                                 node = tempgraph.order[0]
7536                         else:
7537                                 node = nodes[0]
7538                         display_order.append(node)
7539                         tempgraph.remove(node)
7540                 display_order.reverse()
7541                 self.myopts.pop("--quiet", None)
7542                 self.myopts.pop("--verbose", None)
7543                 self.myopts["--tree"] = True
7544                 portage.writemsg("\n\n", noiselevel=-1)
7545                 self.display(display_order)
7546                 prefix = colorize("BAD", " * ")
7547                 portage.writemsg("\n", noiselevel=-1)
7548                 portage.writemsg(prefix + "Error: circular dependencies:\n",
7549                         noiselevel=-1)
7550                 portage.writemsg("\n", noiselevel=-1)
7551                 mygraph.debug_print()
7552                 portage.writemsg("\n", noiselevel=-1)
7553                 portage.writemsg(prefix + "Note that circular dependencies " + \
7554                         "can often be avoided by temporarily\n", noiselevel=-1)
7555                 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7556                         "optional dependencies.\n", noiselevel=-1)
7557
7558         def _show_merge_list(self):
7559                 if self._serialized_tasks_cache is not None and \
7560                         not (self._displayed_list and \
7561                         (self._displayed_list == self._serialized_tasks_cache or \
7562                         self._displayed_list == \
7563                                 list(reversed(self._serialized_tasks_cache)))):
7564                         display_list = self._serialized_tasks_cache[:]
7565                         if "--tree" in self.myopts:
7566                                 display_list.reverse()
7567                         self.display(display_list)
7568
7569         def _show_unsatisfied_blockers(self, blockers):
7570                 self._show_merge_list()
7571                 msg = "Error: The above package list contains " + \
7572                         "packages which cannot be installed " + \
7573                         "at the same time on the same system."
7574                 prefix = colorize("BAD", " * ")
7575                 from textwrap import wrap
7576                 portage.writemsg("\n", noiselevel=-1)
7577                 for line in wrap(msg, 70):
7578                         portage.writemsg(prefix + line + "\n", noiselevel=-1)
7579
7580                 # Display the conflicting packages along with the packages
7581                 # that pulled them in. This is helpful for troubleshooting
7582                 # cases in which blockers don't solve automatically and
7583                 # the reasons are not apparent from the normal merge list
7584                 # display.
7585
7586                 conflict_pkgs = {}
7587                 for blocker in blockers:
7588                         for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7589                                 self._blocker_parents.parent_nodes(blocker)):
7590                                 parent_atoms = self._parent_atoms.get(pkg)
7591                                 if not parent_atoms:
7592                                         atom = self._blocked_world_pkgs.get(pkg)
7593                                         if atom is not None:
7594                                                 parent_atoms = set([("@world", atom)])
7595                                 if parent_atoms:
7596                                         conflict_pkgs[pkg] = parent_atoms
7597
7598                 if conflict_pkgs:
7599                         # Reduce noise by pruning packages that are only
7600                         # pulled in by other conflict packages.
7601                         pruned_pkgs = set()
7602                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7603                                 relevant_parent = False
7604                                 for parent, atom in parent_atoms:
7605                                         if parent not in conflict_pkgs:
7606                                                 relevant_parent = True
7607                                                 break
7608                                 if not relevant_parent:
7609                                         pruned_pkgs.add(pkg)
7610                         for pkg in pruned_pkgs:
7611                                 del conflict_pkgs[pkg]
7612
7613                 if conflict_pkgs:
7614                         msg = []
7615                         msg.append("\n")
7616                         indent = "  "
7617                         # Max number of parents shown, to avoid flooding the display.
7618                         max_parents = 3
7619                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7620
7621                                 pruned_list = set()
7622
7623                                 # Prefer packages that are not directly involved in a conflict.
7624                                 for parent_atom in parent_atoms:
7625                                         if len(pruned_list) >= max_parents:
7626                                                 break
7627                                         parent, atom = parent_atom
7628                                         if parent not in conflict_pkgs:
7629                                                 pruned_list.add(parent_atom)
7630
7631                                 for parent_atom in parent_atoms:
7632                                         if len(pruned_list) >= max_parents:
7633                                                 break
7634                                         pruned_list.add(parent_atom)
7635
7636                                 omitted_parents = len(parent_atoms) - len(pruned_list)
7637                                 msg.append(indent + "%s pulled in by\n" % pkg)
7638
7639                                 for parent_atom in pruned_list:
7640                                         parent, atom = parent_atom
7641                                         msg.append(2*indent)
7642                                         if isinstance(parent,
7643                                                 (PackageArg, AtomArg)):
7644                                                 # For PackageArg and AtomArg types, it's
7645                                                 # redundant to display the atom attribute.
7646                                                 msg.append(str(parent))
7647                                         else:
7648                                                 # Display the specific atom from SetArg or
7649                                                 # Package types.
7650                                                 msg.append("%s required by %s" % (atom, parent))
7651                                         msg.append("\n")
7652
7653                                 if omitted_parents:
7654                                         msg.append(2*indent)
7655                                         msg.append("(and %d more)\n" % omitted_parents)
7656
7657                                 msg.append("\n")
7658
7659                         sys.stderr.write("".join(msg))
7660                         sys.stderr.flush()
7661
7662                 if "--quiet" not in self.myopts:
7663                         show_blocker_docs_link()
7664
7665         def display(self, mylist, favorites=[], verbosity=None):
7666
7667                 # This is used to prevent display_problems() from
7668                 # redundantly displaying this exact same merge list
7669                 # again via _show_merge_list().
7670                 self._displayed_list = mylist
7671
7672                 if verbosity is None:
7673                         verbosity = ("--quiet" in self.myopts and 1 or \
7674                                 "--verbose" in self.myopts and 3 or 2)
7675                 favorites_set = InternalPackageSet(favorites)
7676                 oneshot = "--oneshot" in self.myopts or \
7677                         "--onlydeps" in self.myopts
7678                 columns = "--columns" in self.myopts
7679                 changelogs=[]
7680                 p=[]
7681                 blockers = []
7682
7683                 counters = PackageCounters()
7684
7685                 if verbosity == 1 and "--verbose" not in self.myopts:
7686                         def create_use_string(*args):
7687                                 return ""
7688                 else:
7689                         def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7690                                 old_iuse, old_use,
7691                                 is_new, reinst_flags,
7692                                 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7693                                 alphabetical=("--alphabetical" in self.myopts)):
7694                                 enabled = []
7695                                 if alphabetical:
7696                                         disabled = enabled
7697                                         removed = enabled
7698                                 else:
7699                                         disabled = []
7700                                         removed = []
7701                                 cur_iuse = set(cur_iuse)
7702                                 enabled_flags = cur_iuse.intersection(cur_use)
7703                                 removed_iuse = set(old_iuse).difference(cur_iuse)
7704                                 any_iuse = cur_iuse.union(old_iuse)
7705                                 any_iuse = list(any_iuse)
7706                                 any_iuse.sort()
7707                                 for flag in any_iuse:
7708                                         flag_str = None
7709                                         isEnabled = False
7710                                         reinst_flag = reinst_flags and flag in reinst_flags
7711                                         if flag in enabled_flags:
7712                                                 isEnabled = True
7713                                                 if is_new or flag in old_use and \
7714                                                         (all_flags or reinst_flag):
7715                                                         flag_str = red(flag)
7716                                                 elif flag not in old_iuse:
7717                                                         flag_str = yellow(flag) + "%*"
7718                                                 elif flag not in old_use:
7719                                                         flag_str = green(flag) + "*"
7720                                         elif flag in removed_iuse:
7721                                                 if all_flags or reinst_flag:
7722                                                         flag_str = yellow("-" + flag) + "%"
7723                                                         if flag in old_use:
7724                                                                 flag_str += "*"
7725                                                         flag_str = "(" + flag_str + ")"
7726                                                         removed.append(flag_str)
7727                                                 continue
7728                                         else:
7729                                                 if is_new or flag in old_iuse and \
7730                                                         flag not in old_use and \
7731                                                         (all_flags or reinst_flag):
7732                                                         flag_str = blue("-" + flag)
7733                                                 elif flag not in old_iuse:
7734                                                         flag_str = yellow("-" + flag)
7735                                                         if flag not in iuse_forced:
7736                                                                 flag_str += "%"
7737                                                 elif flag in old_use:
7738                                                         flag_str = green("-" + flag) + "*"
7739                                         if flag_str:
7740                                                 if flag in iuse_forced:
7741                                                         flag_str = "(" + flag_str + ")"
7742                                                 if isEnabled:
7743                                                         enabled.append(flag_str)
7744                                                 else:
7745                                                         disabled.append(flag_str)
7746
7747                                 if alphabetical:
7748                                         ret = " ".join(enabled)
7749                                 else:
7750                                         ret = " ".join(enabled + disabled + removed)
7751                                 if ret:
7752                                         ret = '%s="%s" ' % (name, ret)
7753                                 return ret
7754
7755                 repo_display = RepoDisplay(self.roots)
7756
7757                 tree_nodes = []
7758                 display_list = []
7759                 mygraph = self.digraph.copy()
7760
7761                 # If there are any Uninstall instances, add the corresponding
7762                 # blockers to the digraph (useful for --tree display).
7763
7764                 executed_uninstalls = set(node for node in mylist \
7765                         if isinstance(node, Package) and node.operation == "unmerge")
7766
7767                 for uninstall in self._blocker_uninstalls.leaf_nodes():
7768                         uninstall_parents = \
7769                                 self._blocker_uninstalls.parent_nodes(uninstall)
7770                         if not uninstall_parents:
7771                                 continue
7772
7773                         # Remove the corresponding "nomerge" node and substitute
7774                         # the Uninstall node.
7775                         inst_pkg = self._pkg_cache[
7776                                 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7777                         try:
7778                                 mygraph.remove(inst_pkg)
7779                         except KeyError:
7780                                 pass
7781
7782                         try:
7783                                 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7784                         except KeyError:
7785                                 inst_pkg_blockers = []
7786
7787                         # Break the Package -> Uninstall edges.
7788                         mygraph.remove(uninstall)
7789
7790                         # Resolution of a package's blockers
7791                         # depend on it's own uninstallation.
7792                         for blocker in inst_pkg_blockers:
7793                                 mygraph.add(uninstall, blocker)
7794
7795                         # Expand Package -> Uninstall edges into
7796                         # Package -> Blocker -> Uninstall edges.
7797                         for blocker in uninstall_parents:
7798                                 mygraph.add(uninstall, blocker)
7799                                 for parent in self._blocker_parents.parent_nodes(blocker):
7800                                         if parent != inst_pkg:
7801                                                 mygraph.add(blocker, parent)
7802
7803                         # If the uninstall task did not need to be executed because
7804                         # of an upgrade, display Blocker -> Upgrade edges since the
7805                         # corresponding Blocker -> Uninstall edges will not be shown.
7806                         upgrade_node = \
7807                                 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7808                         if upgrade_node is not None and \
7809                                 uninstall not in executed_uninstalls:
7810                                 for blocker in uninstall_parents:
7811                                         mygraph.add(upgrade_node, blocker)
7812
7813                 unsatisfied_blockers = []
7814                 i = 0
7815                 depth = 0
7816                 shown_edges = set()
7817                 for x in mylist:
7818                         if isinstance(x, Blocker) and not x.satisfied:
7819                                 unsatisfied_blockers.append(x)
7820                                 continue
7821                         graph_key = x
7822                         if "--tree" in self.myopts:
7823                                 depth = len(tree_nodes)
7824                                 while depth and graph_key not in \
7825                                         mygraph.child_nodes(tree_nodes[depth-1]):
7826                                                 depth -= 1
7827                                 if depth:
7828                                         tree_nodes = tree_nodes[:depth]
7829                                         tree_nodes.append(graph_key)
7830                                         display_list.append((x, depth, True))
7831                                         shown_edges.add((graph_key, tree_nodes[depth-1]))
7832                                 else:
7833                                         traversed_nodes = set() # prevent endless circles
7834                                         traversed_nodes.add(graph_key)
7835                                         def add_parents(current_node, ordered):
7836                                                 parent_nodes = None
7837                                                 # Do not traverse to parents if this node is an
7838                                                 # an argument or a direct member of a set that has
7839                                                 # been specified as an argument (system or world).
7840                                                 if current_node not in self._set_nodes:
7841                                                         parent_nodes = mygraph.parent_nodes(current_node)
7842                                                 if parent_nodes:
7843                                                         child_nodes = set(mygraph.child_nodes(current_node))
7844                                                         selected_parent = None
7845                                                         # First, try to avoid a direct cycle.
7846                                                         for node in parent_nodes:
7847                                                                 if not isinstance(node, (Blocker, Package)):
7848                                                                         continue
7849                                                                 if node not in traversed_nodes and \
7850                                                                         node not in child_nodes:
7851                                                                         edge = (current_node, node)
7852                                                                         if edge in shown_edges:
7853                                                                                 continue
7854                                                                         selected_parent = node
7855                                                                         break
7856                                                         if not selected_parent:
7857                                                                 # A direct cycle is unavoidable.
7858                                                                 for node in parent_nodes:
7859                                                                         if not isinstance(node, (Blocker, Package)):
7860                                                                                 continue
7861                                                                         if node not in traversed_nodes:
7862                                                                                 edge = (current_node, node)
7863                                                                                 if edge in shown_edges:
7864                                                                                         continue
7865                                                                                 selected_parent = node
7866                                                                                 break
7867                                                         if selected_parent:
7868                                                                 shown_edges.add((current_node, selected_parent))
7869                                                                 traversed_nodes.add(selected_parent)
7870                                                                 add_parents(selected_parent, False)
7871                                                 display_list.append((current_node,
7872                                                         len(tree_nodes), ordered))
7873                                                 tree_nodes.append(current_node)
7874                                         tree_nodes = []
7875                                         add_parents(graph_key, True)
7876                         else:
7877                                 display_list.append((x, depth, True))
7878                 mylist = display_list
7879                 for x in unsatisfied_blockers:
7880                         mylist.append((x, 0, True))
7881
7882                 last_merge_depth = 0
7883                 for i in xrange(len(mylist)-1,-1,-1):
7884                         graph_key, depth, ordered = mylist[i]
7885                         if not ordered and depth == 0 and i > 0 \
7886                                 and graph_key == mylist[i-1][0] and \
7887                                 mylist[i-1][1] == 0:
7888                                 # An ordered node got a consecutive duplicate when the tree was
7889                                 # being filled in.
7890                                 del mylist[i]
7891                                 continue
7892                         if ordered and graph_key[-1] != "nomerge":
7893                                 last_merge_depth = depth
7894                                 continue
7895                         if depth >= last_merge_depth or \
7896                                 i < len(mylist) - 1 and \
7897                                 depth >= mylist[i+1][1]:
7898                                         del mylist[i]
7899
7900                 from portage import flatten
7901                 from portage.dep import use_reduce, paren_reduce
7902                 # files to fetch list - avoids counting a same file twice
7903                 # in size display (verbose mode)
7904                 myfetchlist=[]
7905
7906                 # Use this set to detect when all the "repoadd" strings are "[0]"
7907                 # and disable the entire repo display in this case.
7908                 repoadd_set = set()
7909
7910                 for mylist_index in xrange(len(mylist)):
7911                         x, depth, ordered = mylist[mylist_index]
7912                         pkg_type = x[0]
7913                         myroot = x[1]
7914                         pkg_key = x[2]
7915                         portdb = self.trees[myroot]["porttree"].dbapi
7916                         bindb  = self.trees[myroot]["bintree"].dbapi
7917                         vardb = self.trees[myroot]["vartree"].dbapi
7918                         vartree = self.trees[myroot]["vartree"]
7919                         pkgsettings = self.pkgsettings[myroot]
7920
7921                         fetch=" "
7922                         indent = " " * depth
7923
7924                         if isinstance(x, Blocker):
7925                                 if x.satisfied:
7926                                         blocker_style = "PKG_BLOCKER_SATISFIED"
7927                                         addl = "%s  %s  " % (colorize(blocker_style, "b"), fetch)
7928                                 else:
7929                                         blocker_style = "PKG_BLOCKER"
7930                                         addl = "%s  %s  " % (colorize(blocker_style, "B"), fetch)
7931                                 if ordered:
7932                                         counters.blocks += 1
7933                                         if x.satisfied:
7934                                                 counters.blocks_satisfied += 1
7935                                 resolved = portage.key_expand(
7936                                         str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
7937                                 if "--columns" in self.myopts and "--quiet" in self.myopts:
7938                                         addl += " " + colorize(blocker_style, resolved)
7939                                 else:
7940                                         addl = "[%s %s] %s%s" % \
7941                                                 (colorize(blocker_style, "blocks"),
7942                                                 addl, indent, colorize(blocker_style, resolved))
7943                                 block_parents = self._blocker_parents.parent_nodes(x)
7944                                 block_parents = set([pnode[2] for pnode in block_parents])
7945                                 block_parents = ", ".join(block_parents)
7946                                 if resolved!=x[2]:
7947                                         addl += colorize(blocker_style,
7948                                                 " (\"%s\" is blocking %s)") % \
7949                                                 (str(x.atom).lstrip("!"), block_parents)
7950                                 else:
7951                                         addl += colorize(blocker_style,
7952                                                 " (is blocking %s)") % block_parents
7953                                 if isinstance(x, Blocker) and x.satisfied:
7954                                         if columns:
7955                                                 continue
7956                                         p.append(addl)
7957                                 else:
7958                                         blockers.append(addl)
7959                         else:
7960                                 pkg_status = x[3]
7961                                 pkg_merge = ordered and pkg_status == "merge"
7962                                 if not pkg_merge and pkg_status == "merge":
7963                                         pkg_status = "nomerge"
7964                                 built = pkg_type != "ebuild"
7965                                 installed = pkg_type == "installed"
7966                                 pkg = x
7967                                 metadata = pkg.metadata
7968                                 ebuild_path = None
7969                                 repo_name = metadata["repository"]
7970                                 if pkg_type == "ebuild":
7971                                         ebuild_path = portdb.findname(pkg_key)
7972                                         if not ebuild_path: # shouldn't happen
7973                                                 raise portage.exception.PackageNotFound(pkg_key)
7974                                         repo_path_real = os.path.dirname(os.path.dirname(
7975                                                 os.path.dirname(ebuild_path)))
7976                                 else:
7977                                         repo_path_real = portdb.getRepositoryPath(repo_name)
7978                                 pkg_use = list(pkg.use.enabled)
7979                                 try:
7980                                         restrict = flatten(use_reduce(paren_reduce(
7981                                                 pkg.metadata["RESTRICT"]), uselist=pkg_use))
7982                                 except portage.exception.InvalidDependString, e:
7983                                         if not pkg.installed:
7984                                                 show_invalid_depstring_notice(x,
7985                                                         pkg.metadata["RESTRICT"], str(e))
7986                                                 del e
7987                                                 return 1
7988                                         restrict = []
7989                                 if "ebuild" == pkg_type and x[3] != "nomerge" and \
7990                                         "fetch" in restrict:
7991                                         fetch = red("F")
7992                                         if ordered:
7993                                                 counters.restrict_fetch += 1
7994                                         if portdb.fetch_check(pkg_key, pkg_use):
7995                                                 fetch = green("f")
7996                                                 if ordered:
7997                                                         counters.restrict_fetch_satisfied += 1
7998
7999                                 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
8000                                 #param is used for -u, where you still *do* want to see when something is being upgraded.
8001                                 myoldbest = []
8002                                 myinslotlist = None
8003                                 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
8004                                 if vardb.cpv_exists(pkg_key):
8005                                         addl="  "+yellow("R")+fetch+"  "
8006                                         if ordered:
8007                                                 if pkg_merge:
8008                                                         counters.reinst += 1
8009                                                 elif pkg_status == "uninstall":
8010                                                         counters.uninst += 1
8011                                 # filter out old-style virtual matches
8012                                 elif installed_versions and \
8013                                         portage.cpv_getkey(installed_versions[0]) == \
8014                                         portage.cpv_getkey(pkg_key):
8015                                         myinslotlist = vardb.match(pkg.slot_atom)
8016                                         # If this is the first install of a new-style virtual, we
8017                                         # need to filter out old-style virtual matches.
8018                                         if myinslotlist and \
8019                                                 portage.cpv_getkey(myinslotlist[0]) != \
8020                                                 portage.cpv_getkey(pkg_key):
8021                                                 myinslotlist = None
8022                                         if myinslotlist:
8023                                                 myoldbest = myinslotlist[:]
8024                                                 addl = "   " + fetch
8025                                                 if not portage.dep.cpvequal(pkg_key,
8026                                                         portage.best([pkg_key] + myoldbest)):
8027                                                         # Downgrade in slot
8028                                                         addl += turquoise("U")+blue("D")
8029                                                         if ordered:
8030                                                                 counters.downgrades += 1
8031                                                 else:
8032                                                         # Update in slot
8033                                                         addl += turquoise("U") + " "
8034                                                         if ordered:
8035                                                                 counters.upgrades += 1
8036                                         else:
8037                                                 # New slot, mark it new.
8038                                                 addl = " " + green("NS") + fetch + "  "
8039                                                 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
8040                                                 if ordered:
8041                                                         counters.newslot += 1
8042
8043                                         if "--changelog" in self.myopts:
8044                                                 inst_matches = vardb.match(pkg.slot_atom)
8045                                                 if inst_matches:
8046                                                         changelogs.extend(self.calc_changelog(
8047                                                                 portdb.findname(pkg_key),
8048                                                                 inst_matches[0], pkg_key))
8049                                 else:
8050                                         addl = " " + green("N") + " " + fetch + "  "
8051                                         if ordered:
8052                                                 counters.new += 1
8053
8054                                 verboseadd = ""
8055                                 repoadd = None
8056
8057                                 if True:
8058                                         # USE flag display
8059                                         forced_flags = set()
8060                                         pkgsettings.setcpv(pkg) # for package.use.{mask,force}
8061                                         forced_flags.update(pkgsettings.useforce)
8062                                         forced_flags.update(pkgsettings.usemask)
8063
8064                                         cur_use = [flag for flag in pkg.use.enabled \
8065                                                 if flag in pkg.iuse.all]
8066                                         cur_iuse = sorted(pkg.iuse.all)
8067
8068                                         if myoldbest and myinslotlist:
8069                                                 previous_cpv = myoldbest[0]
8070                                         else:
8071                                                 previous_cpv = pkg.cpv
8072                                         if vardb.cpv_exists(previous_cpv):
8073                                                 old_iuse, old_use = vardb.aux_get(
8074                                                                 previous_cpv, ["IUSE", "USE"])
8075                                                 old_iuse = list(set(
8076                                                         filter_iuse_defaults(old_iuse.split())))
8077                                                 old_iuse.sort()
8078                                                 old_use = old_use.split()
8079                                                 is_new = False
8080                                         else:
8081                                                 old_iuse = []
8082                                                 old_use = []
8083                                                 is_new = True
8084
8085                                         old_use = [flag for flag in old_use if flag in old_iuse]
8086
8087                                         use_expand = pkgsettings["USE_EXPAND"].lower().split()
8088                                         use_expand.sort()
8089                                         use_expand.reverse()
8090                                         use_expand_hidden = \
8091                                                 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
8092
8093                                         def map_to_use_expand(myvals, forcedFlags=False,
8094                                                 removeHidden=True):
8095                                                 ret = {}
8096                                                 forced = {}
8097                                                 for exp in use_expand:
8098                                                         ret[exp] = []
8099                                                         forced[exp] = set()
8100                                                         for val in myvals[:]:
8101                                                                 if val.startswith(exp.lower()+"_"):
8102                                                                         if val in forced_flags:
8103                                                                                 forced[exp].add(val[len(exp)+1:])
8104                                                                         ret[exp].append(val[len(exp)+1:])
8105                                                                         myvals.remove(val)
8106                                                 ret["USE"] = myvals
8107                                                 forced["USE"] = [val for val in myvals \
8108                                                         if val in forced_flags]
8109                                                 if removeHidden:
8110                                                         for exp in use_expand_hidden:
8111                                                                 ret.pop(exp, None)
8112                                                 if forcedFlags:
8113                                                         return ret, forced
8114                                                 return ret
8115
8116                                         # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
8117                                         # are the only thing that triggered reinstallation.
8118                                         reinst_flags_map = {}
8119                                         reinstall_for_flags = self._reinstall_nodes.get(pkg)
8120                                         reinst_expand_map = None
8121                                         if reinstall_for_flags:
8122                                                 reinst_flags_map = map_to_use_expand(
8123                                                         list(reinstall_for_flags), removeHidden=False)
8124                                                 for k in list(reinst_flags_map):
8125                                                         if not reinst_flags_map[k]:
8126                                                                 del reinst_flags_map[k]
8127                                                 if not reinst_flags_map.get("USE"):
8128                                                         reinst_expand_map = reinst_flags_map.copy()
8129                                                         reinst_expand_map.pop("USE", None)
8130                                         if reinst_expand_map and \
8131                                                 not set(reinst_expand_map).difference(
8132                                                 use_expand_hidden):
8133                                                 use_expand_hidden = \
8134                                                         set(use_expand_hidden).difference(
8135                                                         reinst_expand_map)
8136
8137                                         cur_iuse_map, iuse_forced = \
8138                                                 map_to_use_expand(cur_iuse, forcedFlags=True)
8139                                         cur_use_map = map_to_use_expand(cur_use)
8140                                         old_iuse_map = map_to_use_expand(old_iuse)
8141                                         old_use_map = map_to_use_expand(old_use)
8142
8143                                         use_expand.sort()
8144                                         use_expand.insert(0, "USE")
8145                                         
8146                                         for key in use_expand:
8147                                                 if key in use_expand_hidden:
8148                                                         continue
8149                                                 verboseadd += create_use_string(key.upper(),
8150                                                         cur_iuse_map[key], iuse_forced[key],
8151                                                         cur_use_map[key], old_iuse_map[key],
8152                                                         old_use_map[key], is_new,
8153                                                         reinst_flags_map.get(key))
8154
8155                                 if verbosity == 3:
8156                                         # size verbose
8157                                         mysize=0
8158                                         if pkg_type == "ebuild" and pkg_merge:
8159                                                 try:
8160                                                         myfilesdict = portdb.getfetchsizes(pkg_key,
8161                                                                 useflags=pkg_use, debug=self.edebug)
8162                                                 except portage.exception.InvalidDependString, e:
8163                                                         src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
8164                                                         show_invalid_depstring_notice(x, src_uri, str(e))
8165                                                         del e
8166                                                         return 1
8167                                                 if myfilesdict is None:
8168                                                         myfilesdict="[empty/missing/bad digest]"
8169                                                 else:
8170                                                         for myfetchfile in myfilesdict:
8171                                                                 if myfetchfile not in myfetchlist:
8172                                                                         mysize+=myfilesdict[myfetchfile]
8173                                                                         myfetchlist.append(myfetchfile)
8174                                                         if ordered:
8175                                                                 counters.totalsize += mysize
8176                                                 verboseadd += format_size(mysize)
8177
8178                                         # overlay verbose
8179                                         # assign index for a previous version in the same slot
8180                                         has_previous = False
8181                                         repo_name_prev = None
8182                                         slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
8183                                                 metadata["SLOT"])
8184                                         slot_matches = vardb.match(slot_atom)
8185                                         if slot_matches:
8186                                                 has_previous = True
8187                                                 repo_name_prev = vardb.aux_get(slot_matches[0],
8188                                                         ["repository"])[0]
8189
8190                                         # now use the data to generate output
8191                                         if pkg.installed or not has_previous:
8192                                                 repoadd = repo_display.repoStr(repo_path_real)
8193                                         else:
8194                                                 repo_path_prev = None
8195                                                 if repo_name_prev:
8196                                                         repo_path_prev = portdb.getRepositoryPath(
8197                                                                 repo_name_prev)
8198                                                 if repo_path_prev == repo_path_real:
8199                                                         repoadd = repo_display.repoStr(repo_path_real)
8200                                                 else:
8201                                                         repoadd = "%s=>%s" % (
8202                                                                 repo_display.repoStr(repo_path_prev),
8203                                                                 repo_display.repoStr(repo_path_real))
8204                                         if repoadd:
8205                                                 repoadd_set.add(repoadd)
8206
8207                                 xs = [portage.cpv_getkey(pkg_key)] + \
8208                                         list(portage.catpkgsplit(pkg_key)[2:])
8209                                 if xs[2] == "r0":
8210                                         xs[2] = ""
8211                                 else:
8212                                         xs[2] = "-" + xs[2]
8213
8214                                 mywidth = 130
8215                                 if "COLUMNWIDTH" in self.settings:
8216                                         try:
8217                                                 mywidth = int(self.settings["COLUMNWIDTH"])
8218                                         except ValueError, e:
8219                                                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8220                                                 portage.writemsg(
8221                                                         "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
8222                                                         self.settings["COLUMNWIDTH"], noiselevel=-1)
8223                                                 del e
8224                                 oldlp = mywidth - 30
8225                                 newlp = oldlp - 30
8226
8227                                 # Convert myoldbest from a list to a string.
8228                                 if not myoldbest:
8229                                         myoldbest = ""
8230                                 else:
8231                                         for pos, key in enumerate(myoldbest):
8232                                                 key = portage.catpkgsplit(key)[2] + \
8233                                                         "-" + portage.catpkgsplit(key)[3]
8234                                                 if key[-3:] == "-r0":
8235                                                         key = key[:-3]
8236                                                 myoldbest[pos] = key
8237                                         myoldbest = blue("["+", ".join(myoldbest)+"]")
8238
8239                                 pkg_cp = xs[0]
8240                                 root_config = self.roots[myroot]
8241                                 system_set = root_config.sets["system"]
8242                                 world_set  = root_config.sets["world"]
8243
8244                                 pkg_system = False
8245                                 pkg_world = False
8246                                 try:
8247                                         pkg_system = system_set.findAtomForPackage(pkg)
8248                                         pkg_world  = world_set.findAtomForPackage(pkg)
8249                                         if not (oneshot or pkg_world) and \
8250                                                 myroot == self.target_root and \
8251                                                 favorites_set.findAtomForPackage(pkg):
8252                                                 # Maybe it will be added to world now.
8253                                                 if create_world_atom(pkg, favorites_set, root_config):
8254                                                         pkg_world = True
8255                                 except portage.exception.InvalidDependString:
8256                                         # This is reported elsewhere if relevant.
8257                                         pass
8258
8259                                 def pkgprint(pkg_str):
8260                                         if pkg_merge:
8261                                                 if pkg_system:
8262                                                         return colorize("PKG_MERGE_SYSTEM", pkg_str)
8263                                                 elif pkg_world:
8264                                                         return colorize("PKG_MERGE_WORLD", pkg_str)
8265                                                 else:
8266                                                         return colorize("PKG_MERGE", pkg_str)
8267                                         elif pkg_status == "uninstall":
8268                                                 return colorize("PKG_UNINSTALL", pkg_str)
8269                                         else:
8270                                                 if pkg_system:
8271                                                         return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8272                                                 elif pkg_world:
8273                                                         return colorize("PKG_NOMERGE_WORLD", pkg_str)
8274                                                 else:
8275                                                         return colorize("PKG_NOMERGE", pkg_str)
8276
8277                                 try:
8278                                         properties = flatten(use_reduce(paren_reduce(
8279                                                 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8280                                 except portage.exception.InvalidDependString, e:
8281                                         if not pkg.installed:
8282                                                 show_invalid_depstring_notice(pkg,
8283                                                         pkg.metadata["PROPERTIES"], str(e))
8284                                                 del e
8285                                                 return 1
8286                                         properties = []
8287                                 interactive = "interactive" in properties
8288                                 if interactive and pkg.operation == "merge":
8289                                         addl = colorize("WARN", "I") + addl[1:]
8290                                         if ordered:
8291                                                 counters.interactive += 1
8292
8293                                 if x[1]!="/":
8294                                         if myoldbest:
8295                                                 myoldbest +=" "
8296                                         if "--columns" in self.myopts:
8297                                                 if "--quiet" in self.myopts:
8298                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8299                                                         myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8300                                                         myprint=myprint+myoldbest
8301                                                         myprint=myprint+darkgreen("to "+x[1])
8302                                                         verboseadd = None
8303                                                 else:
8304                                                         if not pkg_merge:
8305                                                                 myprint = "[%s] %s%s" % \
8306                                                                         (pkgprint(pkg_status.ljust(13)),
8307                                                                         indent, pkgprint(pkg.cp))
8308                                                         else:
8309                                                                 myprint = "[%s %s] %s%s" % \
8310                                                                         (pkgprint(pkg.type_name), addl,
8311                                                                         indent, pkgprint(pkg.cp))
8312                                                         if (newlp-nc_len(myprint)) > 0:
8313                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8314                                                         myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8315                                                         if (oldlp-nc_len(myprint)) > 0:
8316                                                                 myprint=myprint+" "*(oldlp-nc_len(myprint))
8317                                                         myprint=myprint+myoldbest
8318                                                         myprint += darkgreen("to " + pkg.root)
8319                                         else:
8320                                                 if not pkg_merge:
8321                                                         myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8322                                                 else:
8323                                                         myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
8324                                                 myprint += indent + pkgprint(pkg_key) + " " + \
8325                                                         myoldbest + darkgreen("to " + myroot)
8326                                 else:
8327                                         if "--columns" in self.myopts:
8328                                                 if "--quiet" in self.myopts:
8329                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8330                                                         myprint=myprint+" "+green(xs[1]+xs[2])+" "
8331                                                         myprint=myprint+myoldbest
8332                                                         verboseadd = None
8333                                                 else:
8334                                                         if not pkg_merge:
8335                                                                 myprint = "[%s] %s%s" % \
8336                                                                         (pkgprint(pkg_status.ljust(13)),
8337                                                                         indent, pkgprint(pkg.cp))
8338                                                         else:
8339                                                                 myprint = "[%s %s] %s%s" % \
8340                                                                         (pkgprint(pkg.type_name), addl,
8341                                                                         indent, pkgprint(pkg.cp))
8342                                                         if (newlp-nc_len(myprint)) > 0:
8343                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8344                                                         myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8345                                                         if (oldlp-nc_len(myprint)) > 0:
8346                                                                 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8347                                                         myprint += myoldbest
8348                                         else:
8349                                                 if not pkg_merge:
8350                                                         myprint = "[%s] %s%s %s" % \
8351                                                                 (pkgprint(pkg_status.ljust(13)),
8352                                                                 indent, pkgprint(pkg.cpv),
8353                                                                 myoldbest)
8354                                                 else:
8355                                                         myprint = "[%s %s] %s%s %s" % \
8356                                                                 (pkgprint(pkg_type), addl, indent,
8357                                                                 pkgprint(pkg.cpv), myoldbest)
8358
8359                                 if columns and pkg.operation == "uninstall":
8360                                         continue
8361                                 p.append((myprint, verboseadd, repoadd))
8362
8363                                 if "--tree" not in self.myopts and \
8364                                         "--quiet" not in self.myopts and \
8365                                         not self._opts_no_restart.intersection(self.myopts) and \
8366                                         pkg.root == self._running_root.root and \
8367                                         portage.match_from_list(
8368                                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8369                                         not vardb.cpv_exists(pkg.cpv) and \
8370                                         "--quiet" not in self.myopts:
8371                                                 if mylist_index < len(mylist) - 1:
8372                                                         p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8373                                                         p.append(colorize("WARN", "    then resume the merge."))
8374
8375                 out = sys.stdout
8376                 show_repos = repoadd_set and repoadd_set != set(["0"])
8377
8378                 for x in p:
8379                         if isinstance(x, basestring):
8380                                 out.write("%s\n" % (x,))
8381                                 continue
8382
8383                         myprint, verboseadd, repoadd = x
8384
8385                         if verboseadd:
8386                                 myprint += " " + verboseadd
8387
8388                         if show_repos and repoadd:
8389                                 myprint += " " + teal("[%s]" % repoadd)
8390
8391                         out.write("%s\n" % (myprint,))
8392
8393                 for x in blockers:
8394                         print x
8395
8396                 if verbosity == 3:
8397                         print
8398                         print counters
8399                         if show_repos:
8400                                 sys.stdout.write(str(repo_display))
8401
8402                 if "--changelog" in self.myopts:
8403                         print
8404                         for revision,text in changelogs:
8405                                 print bold('*'+revision)
8406                                 sys.stdout.write(text)
8407
8408                 sys.stdout.flush()
8409                 return os.EX_OK
8410
8411         def display_problems(self):
8412                 """
8413                 Display problems with the dependency graph such as slot collisions.
8414                 This is called internally by display() to show the problems _after_
8415                 the merge list where it is most likely to be seen, but if display()
8416                 is not going to be called then this method should be called explicitly
8417                 to ensure that the user is notified of problems with the graph.
8418
8419                 All output goes to stderr, except for unsatisfied dependencies which
8420                 go to stdout for parsing by programs such as autounmask.
8421                 """
8422
8423                 # Note that show_masked_packages() sends it's output to
8424                 # stdout, and some programs such as autounmask parse the
8425                 # output in cases when emerge bails out. However, when
8426                 # show_masked_packages() is called for installed packages
8427                 # here, the message is a warning that is more appropriate
8428                 # to send to stderr, so temporarily redirect stdout to
8429                 # stderr. TODO: Fix output code so there's a cleaner way
8430                 # to redirect everything to stderr.
8431                 sys.stdout.flush()
8432                 sys.stderr.flush()
8433                 stdout = sys.stdout
8434                 try:
8435                         sys.stdout = sys.stderr
8436                         self._display_problems()
8437                 finally:
8438                         sys.stdout = stdout
8439                         sys.stdout.flush()
8440                         sys.stderr.flush()
8441
8442                 # This goes to stdout for parsing by programs like autounmask.
8443                 for pargs, kwargs in self._unsatisfied_deps_for_display:
8444                         self._show_unsatisfied_dep(*pargs, **kwargs)
8445
8446         def _display_problems(self):
8447                 if self._circular_deps_for_display is not None:
8448                         self._show_circular_deps(
8449                                 self._circular_deps_for_display)
8450
8451                 # The user is only notified of a slot conflict if
8452                 # there are no unresolvable blocker conflicts.
8453                 if self._unsatisfied_blockers_for_display is not None:
8454                         self._show_unsatisfied_blockers(
8455                                 self._unsatisfied_blockers_for_display)
8456                 else:
8457                         self._show_slot_collision_notice()
8458
8459                 # TODO: Add generic support for "set problem" handlers so that
8460                 # the below warnings aren't special cases for world only.
8461
8462                 if self._missing_args:
8463                         world_problems = False
8464                         if "world" in self._sets:
8465                                 # Filter out indirect members of world (from nested sets)
8466                                 # since only direct members of world are desired here.
8467                                 world_set = self.roots[self.target_root].sets["world"]
8468                                 for arg, atom in self._missing_args:
8469                                         if arg.name == "world" and atom in world_set:
8470                                                 world_problems = True
8471                                                 break
8472
8473                         if world_problems:
8474                                 sys.stderr.write("\n!!! Problems have been " + \
8475                                         "detected with your world file\n")
8476                                 sys.stderr.write("!!! Please run " + \
8477                                         green("emaint --check world")+"\n\n")
8478
8479                 if self._missing_args:
8480                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8481                                 " Ebuilds for the following packages are either all\n")
8482                         sys.stderr.write(colorize("BAD", "!!!") + \
8483                                 " masked or don't exist:\n")
8484                         sys.stderr.write(" ".join(str(atom) for arg, atom in \
8485                                 self._missing_args) + "\n")
8486
8487                 if self._pprovided_args:
8488                         arg_refs = {}
8489                         for arg, atom in self._pprovided_args:
8490                                 if isinstance(arg, SetArg):
8491                                         parent = arg.name
8492                                         arg_atom = (atom, atom)
8493                                 else:
8494                                         parent = "args"
8495                                         arg_atom = (arg.arg, atom)
8496                                 refs = arg_refs.setdefault(arg_atom, [])
8497                                 if parent not in refs:
8498                                         refs.append(parent)
8499                         msg = []
8500                         msg.append(bad("\nWARNING: "))
8501                         if len(self._pprovided_args) > 1:
8502                                 msg.append("Requested packages will not be " + \
8503                                         "merged because they are listed in\n")
8504                         else:
8505                                 msg.append("A requested package will not be " + \
8506                                         "merged because it is listed in\n")
8507                         msg.append("package.provided:\n\n")
8508                         problems_sets = set()
8509                         for (arg, atom), refs in arg_refs.iteritems():
8510                                 ref_string = ""
8511                                 if refs:
8512                                         problems_sets.update(refs)
8513                                         refs.sort()
8514                                         ref_string = ", ".join(["'%s'" % name for name in refs])
8515                                         ref_string = " pulled in by " + ref_string
8516                                 msg.append("  %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8517                         msg.append("\n")
8518                         if "world" in problems_sets:
8519                                 msg.append("This problem can be solved in one of the following ways:\n\n")
8520                                 msg.append("  A) Use emaint to clean offending packages from world (if not installed).\n")
8521                                 msg.append("  B) Uninstall offending packages (cleans them from world).\n")
8522                                 msg.append("  C) Remove offending entries from package.provided.\n\n")
8523                                 msg.append("The best course of action depends on the reason that an offending\n")
8524                                 msg.append("package.provided entry exists.\n\n")
8525                         sys.stderr.write("".join(msg))
8526
8527                 masked_packages = []
8528                 for pkg in self._masked_installed:
8529                         root_config = pkg.root_config
8530                         pkgsettings = self.pkgsettings[pkg.root]
8531                         mreasons = get_masking_status(pkg, pkgsettings, root_config)
8532                         masked_packages.append((root_config, pkgsettings,
8533                                 pkg.cpv, pkg.metadata, mreasons))
8534                 if masked_packages:
8535                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8536                                 " The following installed packages are masked:\n")
8537                         show_masked_packages(masked_packages)
8538                         show_mask_docs()
8539                         print
8540
8541         def calc_changelog(self,ebuildpath,current,next):
8542                 if ebuildpath == None or not os.path.exists(ebuildpath):
8543                         return []
8544                 current = '-'.join(portage.catpkgsplit(current)[1:])
8545                 if current.endswith('-r0'):
8546                         current = current[:-3]
8547                 next = '-'.join(portage.catpkgsplit(next)[1:])
8548                 if next.endswith('-r0'):
8549                         next = next[:-3]
8550                 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8551                 try:
8552                         changelog = open(changelogpath).read()
8553                 except SystemExit, e:
8554                         raise # Needed else can't exit
8555                 except:
8556                         return []
8557                 divisions = self.find_changelog_tags(changelog)
8558                 #print 'XX from',current,'to',next
8559                 #for div,text in divisions: print 'XX',div
8560                 # skip entries for all revisions above the one we are about to emerge
8561                 for i in range(len(divisions)):
8562                         if divisions[i][0]==next:
8563                                 divisions = divisions[i:]
8564                                 break
8565                 # find out how many entries we are going to display
8566                 for i in range(len(divisions)):
8567                         if divisions[i][0]==current:
8568                                 divisions = divisions[:i]
8569                                 break
8570                 else:
8571                     # couldnt find the current revision in the list. display nothing
8572                         return []
8573                 return divisions
8574
8575         def find_changelog_tags(self,changelog):
8576                 divs = []
8577                 release = None
8578                 while 1:
8579                         match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8580                         if match is None:
8581                                 if release is not None:
8582                                         divs.append((release,changelog))
8583                                 return divs
8584                         if release is not None:
8585                                 divs.append((release,changelog[:match.start()]))
8586                         changelog = changelog[match.end():]
8587                         release = match.group(1)
8588                         if release.endswith('.ebuild'):
8589                                 release = release[:-7]
8590                         if release.endswith('-r0'):
8591                                 release = release[:-3]
8592
8593         def saveNomergeFavorites(self):
8594                 """Find atoms in favorites that are not in the mergelist and add them
8595                 to the world file if necessary."""
8596                 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8597                         "--oneshot", "--onlydeps", "--pretend"):
8598                         if x in self.myopts:
8599                                 return
8600                 root_config = self.roots[self.target_root]
8601                 world_set = root_config.sets["world"]
8602
8603                 world_locked = False
8604                 if hasattr(world_set, "lock"):
8605                         world_set.lock()
8606                         world_locked = True
8607
8608                 if hasattr(world_set, "load"):
8609                         world_set.load() # maybe it's changed on disk
8610
8611                 args_set = self._sets["args"]
8612                 portdb = self.trees[self.target_root]["porttree"].dbapi
8613                 added_favorites = set()
8614                 for x in self._set_nodes:
8615                         pkg_type, root, pkg_key, pkg_status = x
8616                         if pkg_status != "nomerge":
8617                                 continue
8618
8619                         try:
8620                                 myfavkey = create_world_atom(x, args_set, root_config)
8621                                 if myfavkey:
8622                                         if myfavkey in added_favorites:
8623                                                 continue
8624                                         added_favorites.add(myfavkey)
8625                         except portage.exception.InvalidDependString, e:
8626                                 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8627                                         (pkg_key, str(e)), noiselevel=-1)
8628                                 writemsg("!!! see '%s'\n\n" % os.path.join(
8629                                         root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8630                                 del e
8631                 all_added = []
8632                 for k in self._sets:
8633                         if k in ("args", "world") or not root_config.sets[k].world_candidate:
8634                                 continue
8635                         s = SETPREFIX + k
8636                         if s in world_set:
8637                                 continue
8638                         all_added.append(SETPREFIX + k)
8639                 all_added.extend(added_favorites)
8640                 all_added.sort()
8641                 for a in all_added:
8642                         print ">>> Recording %s in \"world\" favorites file..." % \
8643                                 colorize("INFORM", str(a))
8644                 if all_added:
8645                         world_set.update(all_added)
8646
8647                 if world_locked:
8648                         world_set.unlock()
8649
8650         def loadResumeCommand(self, resume_data, skip_masked=False):
8651                 """
8652                 Add a resume command to the graph and validate it in the process.  This
8653                 will raise a PackageNotFound exception if a package is not available.
8654                 """
8655
8656                 if not isinstance(resume_data, dict):
8657                         return False
8658
8659                 mergelist = resume_data.get("mergelist")
8660                 if not isinstance(mergelist, list):
8661                         mergelist = []
8662
8663                 fakedb = self.mydbapi
8664                 trees = self.trees
8665                 serialized_tasks = []
8666                 masked_tasks = []
8667                 for x in mergelist:
8668                         if not (isinstance(x, list) and len(x) == 4):
8669                                 continue
8670                         pkg_type, myroot, pkg_key, action = x
8671                         if pkg_type not in self.pkg_tree_map:
8672                                 continue
8673                         if action != "merge":
8674                                 continue
8675                         tree_type = self.pkg_tree_map[pkg_type]
8676                         mydb = trees[myroot][tree_type].dbapi
8677                         db_keys = list(self._trees_orig[myroot][
8678                                 tree_type].dbapi._aux_cache_keys)
8679                         try:
8680                                 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8681                         except KeyError:
8682                                 # It does no exist or it is corrupt.
8683                                 if action == "uninstall":
8684                                         continue
8685                                 raise portage.exception.PackageNotFound(pkg_key)
8686                         installed = action == "uninstall"
8687                         built = pkg_type != "ebuild"
8688                         root_config = self.roots[myroot]
8689                         pkg = Package(built=built, cpv=pkg_key,
8690                                 installed=installed, metadata=metadata,
8691                                 operation=action, root_config=root_config,
8692                                 type_name=pkg_type)
8693                         if pkg_type == "ebuild":
8694                                 pkgsettings = self.pkgsettings[myroot]
8695                                 pkgsettings.setcpv(pkg)
8696                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8697                         self._pkg_cache[pkg] = pkg
8698
8699                         root_config = self.roots[pkg.root]
8700                         if "merge" == pkg.operation and \
8701                                 not visible(root_config.settings, pkg):
8702                                 if skip_masked:
8703                                         masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8704                                 else:
8705                                         self._unsatisfied_deps_for_display.append(
8706                                                 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8707
8708                         fakedb[myroot].cpv_inject(pkg)
8709                         serialized_tasks.append(pkg)
8710                         self.spinner.update()
8711
8712                 if self._unsatisfied_deps_for_display:
8713                         return False
8714
8715                 if not serialized_tasks or "--nodeps" in self.myopts:
8716                         self._serialized_tasks_cache = serialized_tasks
8717                         self._scheduler_graph = self.digraph
8718                 else:
8719                         self._select_package = self._select_pkg_from_graph
8720                         self.myparams.add("selective")
8721                         # Always traverse deep dependencies in order to account for
8722                         # potentially unsatisfied dependencies of installed packages.
8723                         # This is necessary for correct --keep-going or --resume operation
8724                         # in case a package from a group of circularly dependent packages
8725                         # fails. In this case, a package which has recently been installed
8726                         # may have an unsatisfied circular dependency (pulled in by
8727                         # PDEPEND, for example). So, even though a package is already
8728                         # installed, it may not have all of it's dependencies satisfied, so
8729                         # it may not be usable. If such a package is in the subgraph of
8730                         # deep depenedencies of a scheduled build, that build needs to
8731                         # be cancelled. In order for this type of situation to be
8732                         # recognized, deep traversal of dependencies is required.
8733                         self.myparams.add("deep")
8734
8735                         favorites = resume_data.get("favorites")
8736                         args_set = self._sets["args"]
8737                         if isinstance(favorites, list):
8738                                 args = self._load_favorites(favorites)
8739                         else:
8740                                 args = []
8741
8742                         for task in serialized_tasks:
8743                                 if isinstance(task, Package) and \
8744                                         task.operation == "merge":
8745                                         if not self._add_pkg(task, None):
8746                                                 return False
8747
8748                         # Packages for argument atoms need to be explicitly
8749                         # added via _add_pkg() so that they are included in the
8750                         # digraph (needed at least for --tree display).
8751                         for arg in args:
8752                                 for atom in arg.set:
8753                                         pkg, existing_node = self._select_package(
8754                                                 arg.root_config.root, atom)
8755                                         if existing_node is None and \
8756                                                 pkg is not None:
8757                                                 if not self._add_pkg(pkg, Dependency(atom=atom,
8758                                                         root=pkg.root, parent=arg)):
8759                                                         return False
8760
8761                         # Allow unsatisfied deps here to avoid showing a masking
8762                         # message for an unsatisfied dep that isn't necessarily
8763                         # masked.
8764                         if not self._create_graph(allow_unsatisfied=True):
8765                                 return False
8766
8767                         unsatisfied_deps = []
8768                         for dep in self._unsatisfied_deps:
8769                                 if not isinstance(dep.parent, Package):
8770                                         continue
8771                                 if dep.parent.operation == "merge":
8772                                         unsatisfied_deps.append(dep)
8773                                         continue
8774
8775                                 # For unsatisfied deps of installed packages, only account for
8776                                 # them if they are in the subgraph of dependencies of a package
8777                                 # which is scheduled to be installed.
8778                                 unsatisfied_install = False
8779                                 traversed = set()
8780                                 dep_stack = self.digraph.parent_nodes(dep.parent)
8781                                 while dep_stack:
8782                                         node = dep_stack.pop()
8783                                         if not isinstance(node, Package):
8784                                                 continue
8785                                         if node.operation == "merge":
8786                                                 unsatisfied_install = True
8787                                                 break
8788                                         if node in traversed:
8789                                                 continue
8790                                         traversed.add(node)
8791                                         dep_stack.extend(self.digraph.parent_nodes(node))
8792
8793                                 if unsatisfied_install:
8794                                         unsatisfied_deps.append(dep)
8795
8796                         if masked_tasks or unsatisfied_deps:
8797                                 # This probably means that a required package
8798                                 # was dropped via --skipfirst. It makes the
8799                                 # resume list invalid, so convert it to a
8800                                 # UnsatisfiedResumeDep exception.
8801                                 raise self.UnsatisfiedResumeDep(self,
8802                                         masked_tasks + unsatisfied_deps)
8803                         self._serialized_tasks_cache = None
8804                         try:
8805                                 self.altlist()
8806                         except self._unknown_internal_error:
8807                                 return False
8808
8809                 return True
8810
8811         def _load_favorites(self, favorites):
8812                 """
8813                 Use a list of favorites to resume state from a
8814                 previous select_files() call. This creates similar
8815                 DependencyArg instances to those that would have
8816                 been created by the original select_files() call.
8817                 This allows Package instances to be matched with
8818                 DependencyArg instances during graph creation.
8819                 """
8820                 root_config = self.roots[self.target_root]
8821                 getSetAtoms = root_config.setconfig.getSetAtoms
8822                 sets = root_config.sets
8823                 args = []
8824                 for x in favorites:
8825                         if not isinstance(x, basestring):
8826                                 continue
8827                         if x in ("system", "world"):
8828                                 x = SETPREFIX + x
8829                         if x.startswith(SETPREFIX):
8830                                 s = x[len(SETPREFIX):]
8831                                 if s not in sets:
8832                                         continue
8833                                 if s in self._sets:
8834                                         continue
8835                                 # Recursively expand sets so that containment tests in
8836                                 # self._get_parent_sets() properly match atoms in nested
8837                                 # sets (like if world contains system).
8838                                 expanded_set = InternalPackageSet(
8839                                         initial_atoms=getSetAtoms(s))
8840                                 self._sets[s] = expanded_set
8841                                 args.append(SetArg(arg=x, set=expanded_set,
8842                                         root_config=root_config))
8843                         else:
8844                                 if not portage.isvalidatom(x):
8845                                         continue
8846                                 args.append(AtomArg(arg=x, atom=x,
8847                                         root_config=root_config))
8848
8849                 self._set_args(args)
8850                 return args
8851
8852         class UnsatisfiedResumeDep(portage.exception.PortageException):
8853                 """
8854                 A dependency of a resume list is not installed. This
8855                 can occur when a required package is dropped from the
8856                 merge list via --skipfirst.
8857                 """
8858                 def __init__(self, depgraph, value):
8859                         portage.exception.PortageException.__init__(self, value)
8860                         self.depgraph = depgraph
8861
8862         class _internal_exception(portage.exception.PortageException):
8863                 def __init__(self, value=""):
8864                         portage.exception.PortageException.__init__(self, value)
8865
8866         class _unknown_internal_error(_internal_exception):
8867                 """
8868                 Used by the depgraph internally to terminate graph creation.
8869                 The specific reason for the failure should have been dumped
8870                 to stderr, unfortunately, the exact reason for the failure
8871                 may not be known.
8872                 """
8873
8874         class _serialize_tasks_retry(_internal_exception):
8875                 """
8876                 This is raised by the _serialize_tasks() method when it needs to
8877                 be called again for some reason. The only case that it's currently
8878                 used for is when neglected dependencies need to be added to the
8879                 graph in order to avoid making a potentially unsafe decision.
8880                 """
8881
8882         class _dep_check_composite_db(portage.dbapi):
8883                 """
8884                 A dbapi-like interface that is optimized for use in dep_check() calls.
8885                 This is built on top of the existing depgraph package selection logic.
8886                 Some packages that have been added to the graph may be masked from this
8887                 view in order to influence the atom preference selection that occurs
8888                 via dep_check().
8889                 """
8890                 def __init__(self, depgraph, root):
8891                         portage.dbapi.__init__(self)
8892                         self._depgraph = depgraph
8893                         self._root = root
8894                         self._match_cache = {}
8895                         self._cpv_pkg_map = {}
8896
8897                 def _clear_cache(self):
8898                         self._match_cache.clear()
8899                         self._cpv_pkg_map.clear()
8900
8901                 def match(self, atom):
8902                         ret = self._match_cache.get(atom)
8903                         if ret is not None:
8904                                 return ret[:]
8905                         orig_atom = atom
8906                         if "/" not in atom:
8907                                 atom = self._dep_expand(atom)
8908                         pkg, existing = self._depgraph._select_package(self._root, atom)
8909                         if not pkg:
8910                                 ret = []
8911                         else:
8912                                 # Return the highest available from select_package() as well as
8913                                 # any matching slots in the graph db.
8914                                 slots = set()
8915                                 slots.add(pkg.metadata["SLOT"])
8916                                 atom_cp = portage.dep_getkey(atom)
8917                                 if pkg.cp.startswith("virtual/"):
8918                                         # For new-style virtual lookahead that occurs inside
8919                                         # dep_check(), examine all slots. This is needed
8920                                         # so that newer slots will not unnecessarily be pulled in
8921                                         # when a satisfying lower slot is already installed. For
8922                                         # example, if virtual/jdk-1.4 is satisfied via kaffe then
8923                                         # there's no need to pull in a newer slot to satisfy a
8924                                         # virtual/jdk dependency.
8925                                         for db, pkg_type, built, installed, db_keys in \
8926                                                 self._depgraph._filtered_trees[self._root]["dbs"]:
8927                                                 for cpv in db.match(atom):
8928                                                         if portage.cpv_getkey(cpv) != pkg.cp:
8929                                                                 continue
8930                                                         slots.add(db.aux_get(cpv, ["SLOT"])[0])
8931                                 ret = []
8932                                 if self._visible(pkg):
8933                                         self._cpv_pkg_map[pkg.cpv] = pkg
8934                                         ret.append(pkg.cpv)
8935                                 slots.remove(pkg.metadata["SLOT"])
8936                                 while slots:
8937                                         slot_atom = "%s:%s" % (atom_cp, slots.pop())
8938                                         pkg, existing = self._depgraph._select_package(
8939                                                 self._root, slot_atom)
8940                                         if not pkg:
8941                                                 continue
8942                                         if not self._visible(pkg):
8943                                                 continue
8944                                         self._cpv_pkg_map[pkg.cpv] = pkg
8945                                         ret.append(pkg.cpv)
8946                                 if ret:
8947                                         self._cpv_sort_ascending(ret)
8948                         self._match_cache[orig_atom] = ret
8949                         return ret[:]
8950
8951                 def _visible(self, pkg):
8952                         if pkg.installed and "selective" not in self._depgraph.myparams:
8953                                 try:
8954                                         arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
8955                                 except (StopIteration, portage.exception.InvalidDependString):
8956                                         arg = None
8957                                 if arg:
8958                                         return False
8959                         if pkg.installed:
8960                                 try:
8961                                         if not visible(
8962                                                 self._depgraph.pkgsettings[pkg.root], pkg):
8963                                                 return False
8964                                 except portage.exception.InvalidDependString:
8965                                         pass
8966                         in_graph = self._depgraph._slot_pkg_map[
8967                                 self._root].get(pkg.slot_atom)
8968                         if in_graph is None:
8969                                 # Mask choices for packages which are not the highest visible
8970                                 # version within their slot (since they usually trigger slot
8971                                 # conflicts).
8972                                 highest_visible, in_graph = self._depgraph._select_package(
8973                                         self._root, pkg.slot_atom)
8974                                 if pkg != highest_visible:
8975                                         return False
8976                         elif in_graph != pkg:
8977                                 # Mask choices for packages that would trigger a slot
8978                                 # conflict with a previously selected package.
8979                                 return False
8980                         return True
8981
8982                 def _dep_expand(self, atom):
8983                         """
8984                         This is only needed for old installed packages that may
8985                         contain atoms that are not fully qualified with a specific
8986                         category. Emulate the cpv_expand() function that's used by
8987                         dbapi.match() in cases like this. If there are multiple
8988                         matches, it's often due to a new-style virtual that has
8989                         been added, so try to filter those out to avoid raising
8990                         a ValueError.
8991                         """
8992                         root_config = self._depgraph.roots[self._root]
8993                         orig_atom = atom
8994                         expanded_atoms = self._depgraph._dep_expand(root_config, atom)
8995                         if len(expanded_atoms) > 1:
8996                                 non_virtual_atoms = []
8997                                 for x in expanded_atoms:
8998                                         if not portage.dep_getkey(x).startswith("virtual/"):
8999                                                 non_virtual_atoms.append(x)
9000                                 if len(non_virtual_atoms) == 1:
9001                                         expanded_atoms = non_virtual_atoms
9002                         if len(expanded_atoms) > 1:
9003                                 # compatible with portage.cpv_expand()
9004                                 raise portage.exception.AmbiguousPackageName(
9005                                         [portage.dep_getkey(x) for x in expanded_atoms])
9006                         if expanded_atoms:
9007                                 atom = expanded_atoms[0]
9008                         else:
9009                                 null_atom = insert_category_into_atom(atom, "null")
9010                                 null_cp = portage.dep_getkey(null_atom)
9011                                 cat, atom_pn = portage.catsplit(null_cp)
9012                                 virts_p = root_config.settings.get_virts_p().get(atom_pn)
9013                                 if virts_p:
9014                                         # Allow the resolver to choose which virtual.
9015                                         atom = insert_category_into_atom(atom, "virtual")
9016                                 else:
9017                                         atom = insert_category_into_atom(atom, "null")
9018                         return atom
9019
9020                 def aux_get(self, cpv, wants):
9021                         metadata = self._cpv_pkg_map[cpv].metadata
9022                         return [metadata.get(x, "") for x in wants]
9023
9024 class RepoDisplay(object):
9025         def __init__(self, roots):
9026                 self._shown_repos = {}
9027                 self._unknown_repo = False
9028                 repo_paths = set()
9029                 for root_config in roots.itervalues():
9030                         portdir = root_config.settings.get("PORTDIR")
9031                         if portdir:
9032                                 repo_paths.add(portdir)
9033                         overlays = root_config.settings.get("PORTDIR_OVERLAY")
9034                         if overlays:
9035                                 repo_paths.update(overlays.split())
9036                 repo_paths = list(repo_paths)
9037                 self._repo_paths = repo_paths
9038                 self._repo_paths_real = [ os.path.realpath(repo_path) \
9039                         for repo_path in repo_paths ]
9040
9041                 # pre-allocate index for PORTDIR so that it always has index 0.
9042                 for root_config in roots.itervalues():
9043                         portdb = root_config.trees["porttree"].dbapi
9044                         portdir = portdb.porttree_root
9045                         if portdir:
9046                                 self.repoStr(portdir)
9047
9048         def repoStr(self, repo_path_real):
9049                 real_index = -1
9050                 if repo_path_real:
9051                         real_index = self._repo_paths_real.index(repo_path_real)
9052                 if real_index == -1:
9053                         s = "?"
9054                         self._unknown_repo = True
9055                 else:
9056                         shown_repos = self._shown_repos
9057                         repo_paths = self._repo_paths
9058                         repo_path = repo_paths[real_index]
9059                         index = shown_repos.get(repo_path)
9060                         if index is None:
9061                                 index = len(shown_repos)
9062                                 shown_repos[repo_path] = index
9063                         s = str(index)
9064                 return s
9065
9066         def __str__(self):
9067                 output = []
9068                 shown_repos = self._shown_repos
9069                 unknown_repo = self._unknown_repo
9070                 if shown_repos or self._unknown_repo:
9071                         output.append("Portage tree and overlays:\n")
9072                 show_repo_paths = list(shown_repos)
9073                 for repo_path, repo_index in shown_repos.iteritems():
9074                         show_repo_paths[repo_index] = repo_path
9075                 if show_repo_paths:
9076                         for index, repo_path in enumerate(show_repo_paths):
9077                                 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9078                 if unknown_repo:
9079                         output.append(" "+teal("[?]") + \
9080                                 " indicates that the source repository could not be determined\n")
9081                 return "".join(output)
9082
9083 class PackageCounters(object):
9084
9085         def __init__(self):
9086                 self.upgrades   = 0
9087                 self.downgrades = 0
9088                 self.new        = 0
9089                 self.newslot    = 0
9090                 self.reinst     = 0
9091                 self.uninst     = 0
9092                 self.blocks     = 0
9093                 self.blocks_satisfied         = 0
9094                 self.totalsize  = 0
9095                 self.restrict_fetch           = 0
9096                 self.restrict_fetch_satisfied = 0
9097                 self.interactive              = 0
9098
9099         def __str__(self):
9100                 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
9101                 myoutput = []
9102                 details = []
9103                 myoutput.append("Total: %s package" % total_installs)
9104                 if total_installs != 1:
9105                         myoutput.append("s")
9106                 if total_installs != 0:
9107                         myoutput.append(" (")
9108                 if self.upgrades > 0:
9109                         details.append("%s upgrade" % self.upgrades)
9110                         if self.upgrades > 1:
9111                                 details[-1] += "s"
9112                 if self.downgrades > 0:
9113                         details.append("%s downgrade" % self.downgrades)
9114                         if self.downgrades > 1:
9115                                 details[-1] += "s"
9116                 if self.new > 0:
9117                         details.append("%s new" % self.new)
9118                 if self.newslot > 0:
9119                         details.append("%s in new slot" % self.newslot)
9120                         if self.newslot > 1:
9121                                 details[-1] += "s"
9122                 if self.reinst > 0:
9123                         details.append("%s reinstall" % self.reinst)
9124                         if self.reinst > 1:
9125                                 details[-1] += "s"
9126                 if self.uninst > 0:
9127                         details.append("%s uninstall" % self.uninst)
9128                         if self.uninst > 1:
9129                                 details[-1] += "s"
9130                 if self.interactive > 0:
9131                         details.append("%s %s" % (self.interactive,
9132                                 colorize("WARN", "interactive")))
9133                 myoutput.append(", ".join(details))
9134                 if total_installs != 0:
9135                         myoutput.append(")")
9136                 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
9137                 if self.restrict_fetch:
9138                         myoutput.append("\nFetch Restriction: %s package" % \
9139                                 self.restrict_fetch)
9140                         if self.restrict_fetch > 1:
9141                                 myoutput.append("s")
9142                 if self.restrict_fetch_satisfied < self.restrict_fetch:
9143                         myoutput.append(bad(" (%s unsatisfied)") % \
9144                                 (self.restrict_fetch - self.restrict_fetch_satisfied))
9145                 if self.blocks > 0:
9146                         myoutput.append("\nConflict: %s block" % \
9147                                 self.blocks)
9148                         if self.blocks > 1:
9149                                 myoutput.append("s")
9150                         if self.blocks_satisfied < self.blocks:
9151                                 myoutput.append(bad(" (%s unsatisfied)") % \
9152                                         (self.blocks - self.blocks_satisfied))
9153                 return "".join(myoutput)
9154
9155 class PollSelectAdapter(PollConstants):
9156
9157         """
9158         Use select to emulate a poll object, for
9159         systems that don't support poll().
9160         """
9161
9162         def __init__(self):
9163                 self._registered = {}
9164                 self._select_args = [[], [], []]
9165
9166         def register(self, fd, *args):
9167                 """
9168                 Only POLLIN is currently supported!
9169                 """
9170                 if len(args) > 1:
9171                         raise TypeError(
9172                                 "register expected at most 2 arguments, got " + \
9173                                 repr(1 + len(args)))
9174
9175                 eventmask = PollConstants.POLLIN | \
9176                         PollConstants.POLLPRI | PollConstants.POLLOUT
9177                 if args:
9178                         eventmask = args[0]
9179
9180                 self._registered[fd] = eventmask
9181                 self._select_args = None
9182
9183         def unregister(self, fd):
9184                 self._select_args = None
9185                 del self._registered[fd]
9186
9187         def poll(self, *args):
9188                 if len(args) > 1:
9189                         raise TypeError(
9190                                 "poll expected at most 2 arguments, got " + \
9191                                 repr(1 + len(args)))
9192
9193                 timeout = None
9194                 if args:
9195                         timeout = args[0]
9196
9197                 select_args = self._select_args
9198                 if select_args is None:
9199                         select_args = [self._registered.keys(), [], []]
9200
9201                 if timeout is not None:
9202                         select_args = select_args[:]
9203                         # Translate poll() timeout args to select() timeout args:
9204                         #
9205                         #          | units        | value(s) for indefinite block
9206                         # ---------|--------------|------------------------------
9207                         #   poll   | milliseconds | omitted, negative, or None
9208                         # ---------|--------------|------------------------------
9209                         #   select | seconds      | omitted
9210                         # ---------|--------------|------------------------------
9211
9212                         if timeout is not None and timeout < 0:
9213                                 timeout = None
9214                         if timeout is not None:
9215                                 select_args.append(timeout / 1000)
9216
9217                 select_events = select.select(*select_args)
9218                 poll_events = []
9219                 for fd in select_events[0]:
9220                         poll_events.append((fd, PollConstants.POLLIN))
9221                 return poll_events
9222
9223 class SequentialTaskQueue(SlotObject):
9224
9225         __slots__ = ("max_jobs", "running_tasks") + \
9226                 ("_dirty", "_scheduling", "_task_queue")
9227
9228         def __init__(self, **kwargs):
9229                 SlotObject.__init__(self, **kwargs)
9230                 self._task_queue = deque()
9231                 self.running_tasks = set()
9232                 if self.max_jobs is None:
9233                         self.max_jobs = 1
9234                 self._dirty = True
9235
9236         def add(self, task):
9237                 self._task_queue.append(task)
9238                 self._dirty = True
9239
9240         def addFront(self, task):
9241                 self._task_queue.appendleft(task)
9242                 self._dirty = True
9243
9244         def schedule(self):
9245
9246                 if not self._dirty:
9247                         return False
9248
9249                 if not self:
9250                         return False
9251
9252                 if self._scheduling:
9253                         # Ignore any recursive schedule() calls triggered via
9254                         # self._task_exit().
9255                         return False
9256
9257                 self._scheduling = True
9258
9259                 task_queue = self._task_queue
9260                 running_tasks = self.running_tasks
9261                 max_jobs = self.max_jobs
9262                 state_changed = False
9263
9264                 while task_queue and \
9265                         (max_jobs is True or len(running_tasks) < max_jobs):
9266                         task = task_queue.popleft()
9267                         cancelled = getattr(task, "cancelled", None)
9268                         if not cancelled:
9269                                 running_tasks.add(task)
9270                                 task.addExitListener(self._task_exit)
9271                                 task.start()
9272                         state_changed = True
9273
9274                 self._dirty = False
9275                 self._scheduling = False
9276
9277                 return state_changed
9278
9279         def _task_exit(self, task):
9280                 """
9281                 Since we can always rely on exit listeners being called, the set of
9282                 running tasks is always pruned automatically and there is never any need
9283                 to actively prune it.
9284                 """
9285                 self.running_tasks.remove(task)
9286                 if self._task_queue:
9287                         self._dirty = True
9288
9289         def clear(self):
9290                 self._task_queue.clear()
9291                 running_tasks = self.running_tasks
9292                 while running_tasks:
9293                         task = running_tasks.pop()
9294                         task.removeExitListener(self._task_exit)
9295                         task.cancel()
9296                 self._dirty = False
9297
9298         def __nonzero__(self):
9299                 return bool(self._task_queue or self.running_tasks)
9300
9301         def __len__(self):
9302                 return len(self._task_queue) + len(self.running_tasks)
9303
9304 _can_poll_device = None
9305
9306 def can_poll_device():
9307         """
9308         Test if it's possible to use poll() on a device such as a pty. This
9309         is known to fail on Darwin.
9310         @rtype: bool
9311         @returns: True if poll() on a device succeeds, False otherwise.
9312         """
9313
9314         global _can_poll_device
9315         if _can_poll_device is not None:
9316                 return _can_poll_device
9317
9318         if not hasattr(select, "poll"):
9319                 _can_poll_device = False
9320                 return _can_poll_device
9321
9322         try:
9323                 dev_null = open('/dev/null', 'rb')
9324         except IOError:
9325                 _can_poll_device = False
9326                 return _can_poll_device
9327
9328         p = select.poll()
9329         p.register(dev_null.fileno(), PollConstants.POLLIN)
9330
9331         invalid_request = False
9332         for f, event in p.poll():
9333                 if event & PollConstants.POLLNVAL:
9334                         invalid_request = True
9335                         break
9336         dev_null.close()
9337
9338         _can_poll_device = not invalid_request
9339         return _can_poll_device
9340
9341 def create_poll_instance():
9342         """
9343         Create an instance of select.poll, or an instance of
9344         PollSelectAdapter there is no poll() implementation or
9345         it is broken somehow.
9346         """
9347         if can_poll_device():
9348                 return select.poll()
9349         return PollSelectAdapter()
9350
9351 getloadavg = getattr(os, "getloadavg", None)
9352 if getloadavg is None:
9353         def getloadavg():
9354                 """
9355                 Uses /proc/loadavg to emulate os.getloadavg().
9356                 Raises OSError if the load average was unobtainable.
9357                 """
9358                 try:
9359                         loadavg_str = open('/proc/loadavg').readline()
9360                 except IOError:
9361                         # getloadavg() is only supposed to raise OSError, so convert
9362                         raise OSError('unknown')
9363                 loadavg_split = loadavg_str.split()
9364                 if len(loadavg_split) < 3:
9365                         raise OSError('unknown')
9366                 loadavg_floats = []
9367                 for i in xrange(3):
9368                         try:
9369                                 loadavg_floats.append(float(loadavg_split[i]))
9370                         except ValueError:
9371                                 raise OSError('unknown')
9372                 return tuple(loadavg_floats)
9373
9374 class PollScheduler(object):
9375
9376         class _sched_iface_class(SlotObject):
9377                 __slots__ = ("register", "schedule", "unregister")
9378
9379         def __init__(self):
9380                 self._max_jobs = 1
9381                 self._max_load = None
9382                 self._jobs = 0
9383                 self._poll_event_queue = []
9384                 self._poll_event_handlers = {}
9385                 self._poll_event_handler_ids = {}
9386                 # Increment id for each new handler.
9387                 self._event_handler_id = 0
9388                 self._poll_obj = create_poll_instance()
9389                 self._scheduling = False
9390
9391         def _schedule(self):
9392                 """
9393                 Calls _schedule_tasks() and automatically returns early from
9394                 any recursive calls to this method that the _schedule_tasks()
9395                 call might trigger. This makes _schedule() safe to call from
9396                 inside exit listeners.
9397                 """
9398                 if self._scheduling:
9399                         return False
9400                 self._scheduling = True
9401                 try:
9402                         return self._schedule_tasks()
9403                 finally:
9404                         self._scheduling = False
9405
9406         def _running_job_count(self):
9407                 return self._jobs
9408
9409         def _can_add_job(self):
9410                 max_jobs = self._max_jobs
9411                 max_load = self._max_load
9412
9413                 if self._max_jobs is not True and \
9414                         self._running_job_count() >= self._max_jobs:
9415                         return False
9416
9417                 if max_load is not None and \
9418                         (max_jobs is True or max_jobs > 1) and \
9419                         self._running_job_count() >= 1:
9420                         try:
9421                                 avg1, avg5, avg15 = getloadavg()
9422                         except OSError:
9423                                 return False
9424
9425                         if avg1 >= max_load:
9426                                 return False
9427
9428                 return True
9429
9430         def _poll(self, timeout=None):
9431                 """
9432                 All poll() calls pass through here. The poll events
9433                 are added directly to self._poll_event_queue.
9434                 In order to avoid endless blocking, this raises
9435                 StopIteration if timeout is None and there are
9436                 no file descriptors to poll.
9437                 """
9438                 if not self._poll_event_handlers:
9439                         self._schedule()
9440                         if timeout is None and \
9441                                 not self._poll_event_handlers:
9442                                 raise StopIteration(
9443                                         "timeout is None and there are no poll() event handlers")
9444
9445                 # The following error is known to occur with Linux kernel versions
9446                 # less than 2.6.24:
9447                 #
9448                 #   select.error: (4, 'Interrupted system call')
9449                 #
9450                 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9451                 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9452                 # without any events.
9453                 while True:
9454                         try:
9455                                 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9456                                 break
9457                         except select.error, e:
9458                                 writemsg_level("\n!!! select error: %s\n" % (e,),
9459                                         level=logging.ERROR, noiselevel=-1)
9460                                 del e
9461                                 if timeout is not None:
9462                                         break
9463
9464         def _next_poll_event(self, timeout=None):
9465                 """
9466                 Since the _schedule_wait() loop is called by event
9467                 handlers from _poll_loop(), maintain a central event
9468                 queue for both of them to share events from a single
9469                 poll() call. In order to avoid endless blocking, this
9470                 raises StopIteration if timeout is None and there are
9471                 no file descriptors to poll.
9472                 """
9473                 if not self._poll_event_queue:
9474                         self._poll(timeout)
9475                 return self._poll_event_queue.pop()
9476
9477         def _poll_loop(self):
9478
9479                 event_handlers = self._poll_event_handlers
9480                 event_handled = False
9481
9482                 try:
9483                         while event_handlers:
9484                                 f, event = self._next_poll_event()
9485                                 handler, reg_id = event_handlers[f]
9486                                 handler(f, event)
9487                                 event_handled = True
9488                 except StopIteration:
9489                         event_handled = True
9490
9491                 if not event_handled:
9492                         raise AssertionError("tight loop")
9493
9494         def _schedule_yield(self):
9495                 """
9496                 Schedule for a short period of time chosen by the scheduler based
9497                 on internal state. Synchronous tasks should call this periodically
9498                 in order to allow the scheduler to service pending poll events. The
9499                 scheduler will call poll() exactly once, without blocking, and any
9500                 resulting poll events will be serviced.
9501                 """
9502                 event_handlers = self._poll_event_handlers
9503                 events_handled = 0
9504
9505                 if not event_handlers:
9506                         return bool(events_handled)
9507
9508                 if not self._poll_event_queue:
9509                         self._poll(0)
9510
9511                 try:
9512                         while event_handlers and self._poll_event_queue:
9513                                 f, event = self._next_poll_event()
9514                                 handler, reg_id = event_handlers[f]
9515                                 handler(f, event)
9516                                 events_handled += 1
9517                 except StopIteration:
9518                         events_handled += 1
9519
9520                 return bool(events_handled)
9521
9522         def _register(self, f, eventmask, handler):
9523                 """
9524                 @rtype: Integer
9525                 @return: A unique registration id, for use in schedule() or
9526                         unregister() calls.
9527                 """
9528                 if f in self._poll_event_handlers:
9529                         raise AssertionError("fd %d is already registered" % f)
9530                 self._event_handler_id += 1
9531                 reg_id = self._event_handler_id
9532                 self._poll_event_handler_ids[reg_id] = f
9533                 self._poll_event_handlers[f] = (handler, reg_id)
9534                 self._poll_obj.register(f, eventmask)
9535                 return reg_id
9536
9537         def _unregister(self, reg_id):
9538                 f = self._poll_event_handler_ids[reg_id]
9539                 self._poll_obj.unregister(f)
9540                 del self._poll_event_handlers[f]
9541                 del self._poll_event_handler_ids[reg_id]
9542
9543         def _schedule_wait(self, wait_ids):
9544                 """
9545                 Schedule until wait_id is not longer registered
9546                 for poll() events.
9547                 @type wait_id: int
9548                 @param wait_id: a task id to wait for
9549                 """
9550                 event_handlers = self._poll_event_handlers
9551                 handler_ids = self._poll_event_handler_ids
9552                 event_handled = False
9553
9554                 if isinstance(wait_ids, int):
9555                         wait_ids = frozenset([wait_ids])
9556
9557                 try:
9558                         while wait_ids.intersection(handler_ids):
9559                                 f, event = self._next_poll_event()
9560                                 handler, reg_id = event_handlers[f]
9561                                 handler(f, event)
9562                                 event_handled = True
9563                 except StopIteration:
9564                         event_handled = True
9565
9566                 return event_handled
9567
9568 class QueueScheduler(PollScheduler):
9569
9570         """
9571         Add instances of SequentialTaskQueue and then call run(). The
9572         run() method returns when no tasks remain.
9573         """
9574
9575         def __init__(self, max_jobs=None, max_load=None):
9576                 PollScheduler.__init__(self)
9577
9578                 if max_jobs is None:
9579                         max_jobs = 1
9580
9581                 self._max_jobs = max_jobs
9582                 self._max_load = max_load
9583                 self.sched_iface = self._sched_iface_class(
9584                         register=self._register,
9585                         schedule=self._schedule_wait,
9586                         unregister=self._unregister)
9587
9588                 self._queues = []
9589                 self._schedule_listeners = []
9590
9591         def add(self, q):
9592                 self._queues.append(q)
9593
9594         def remove(self, q):
9595                 self._queues.remove(q)
9596
9597         def run(self):
9598
9599                 while self._schedule():
9600                         self._poll_loop()
9601
9602                 while self._running_job_count():
9603                         self._poll_loop()
9604
9605         def _schedule_tasks(self):
9606                 """
9607                 @rtype: bool
9608                 @returns: True if there may be remaining tasks to schedule,
9609                         False otherwise.
9610                 """
9611                 while self._can_add_job():
9612                         n = self._max_jobs - self._running_job_count()
9613                         if n < 1:
9614                                 break
9615
9616                         if not self._start_next_job(n):
9617                                 return False
9618
9619                 for q in self._queues:
9620                         if q:
9621                                 return True
9622                 return False
9623
9624         def _running_job_count(self):
9625                 job_count = 0
9626                 for q in self._queues:
9627                         job_count += len(q.running_tasks)
9628                 self._jobs = job_count
9629                 return job_count
9630
9631         def _start_next_job(self, n=1):
9632                 started_count = 0
9633                 for q in self._queues:
9634                         initial_job_count = len(q.running_tasks)
9635                         q.schedule()
9636                         final_job_count = len(q.running_tasks)
9637                         if final_job_count > initial_job_count:
9638                                 started_count += (final_job_count - initial_job_count)
9639                         if started_count >= n:
9640                                 break
9641                 return started_count
9642
9643 class TaskScheduler(object):
9644
9645         """
9646         A simple way to handle scheduling of AsynchrousTask instances. Simply
9647         add tasks and call run(). The run() method returns when no tasks remain.
9648         """
9649
9650         def __init__(self, max_jobs=None, max_load=None):
9651                 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9652                 self._scheduler = QueueScheduler(
9653                         max_jobs=max_jobs, max_load=max_load)
9654                 self.sched_iface = self._scheduler.sched_iface
9655                 self.run = self._scheduler.run
9656                 self._scheduler.add(self._queue)
9657
9658         def add(self, task):
9659                 self._queue.add(task)
9660
9661 class JobStatusDisplay(object):
9662
9663         _bound_properties = ("curval", "failed", "running")
9664         _jobs_column_width = 48
9665
9666         # Don't update the display unless at least this much
9667         # time has passed, in units of seconds.
9668         _min_display_latency = 2
9669
9670         _default_term_codes = {
9671                 'cr'  : '\r',
9672                 'el'  : '\x1b[K',
9673                 'nel' : '\n',
9674         }
9675
9676         _termcap_name_map = {
9677                 'carriage_return' : 'cr',
9678                 'clr_eol'         : 'el',
9679                 'newline'         : 'nel',
9680         }
9681
9682         def __init__(self, out=sys.stdout, quiet=False):
9683                 object.__setattr__(self, "out", out)
9684                 object.__setattr__(self, "quiet", quiet)
9685                 object.__setattr__(self, "maxval", 0)
9686                 object.__setattr__(self, "merges", 0)
9687                 object.__setattr__(self, "_changed", False)
9688                 object.__setattr__(self, "_displayed", False)
9689                 object.__setattr__(self, "_last_display_time", 0)
9690                 object.__setattr__(self, "width", 80)
9691                 self.reset()
9692
9693                 isatty = hasattr(out, "isatty") and out.isatty()
9694                 object.__setattr__(self, "_isatty", isatty)
9695                 if not isatty or not self._init_term():
9696                         term_codes = {}
9697                         for k, capname in self._termcap_name_map.iteritems():
9698                                 term_codes[k] = self._default_term_codes[capname]
9699                         object.__setattr__(self, "_term_codes", term_codes)
9700                 encoding = sys.getdefaultencoding()
9701                 for k, v in self._term_codes.items():
9702                         if not isinstance(v, str):
9703                                 self._term_codes[k] = v.decode(encoding, 'replace')
9704
9705         def _init_term(self):
9706                 """
9707                 Initialize term control codes.
9708                 @rtype: bool
9709                 @returns: True if term codes were successfully initialized,
9710                         False otherwise.
9711                 """
9712
9713                 term_type = os.environ.get("TERM", "vt100")
9714                 tigetstr = None
9715
9716                 try:
9717                         import curses
9718                         try:
9719                                 curses.setupterm(term_type, self.out.fileno())
9720                                 tigetstr = curses.tigetstr
9721                         except curses.error:
9722                                 pass
9723                 except ImportError:
9724                         pass
9725
9726                 if tigetstr is None:
9727                         return False
9728
9729                 term_codes = {}
9730                 for k, capname in self._termcap_name_map.iteritems():
9731                         code = tigetstr(capname)
9732                         if code is None:
9733                                 code = self._default_term_codes[capname]
9734                         term_codes[k] = code
9735                 object.__setattr__(self, "_term_codes", term_codes)
9736                 return True
9737
9738         def _format_msg(self, msg):
9739                 return ">>> %s" % msg
9740
9741         def _erase(self):
9742                 self.out.write(
9743                         self._term_codes['carriage_return'] + \
9744                         self._term_codes['clr_eol'])
9745                 self.out.flush()
9746                 self._displayed = False
9747
9748         def _display(self, line):
9749                 self.out.write(line)
9750                 self.out.flush()
9751                 self._displayed = True
9752
9753         def _update(self, msg):
9754
9755                 out = self.out
9756                 if not self._isatty:
9757                         out.write(self._format_msg(msg) + self._term_codes['newline'])
9758                         self.out.flush()
9759                         self._displayed = True
9760                         return
9761
9762                 if self._displayed:
9763                         self._erase()
9764
9765                 self._display(self._format_msg(msg))
9766
9767         def displayMessage(self, msg):
9768
9769                 was_displayed = self._displayed
9770
9771                 if self._isatty and self._displayed:
9772                         self._erase()
9773
9774                 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9775                 self.out.flush()
9776                 self._displayed = False
9777
9778                 if was_displayed:
9779                         self._changed = True
9780                         self.display()
9781
9782         def reset(self):
9783                 self.maxval = 0
9784                 self.merges = 0
9785                 for name in self._bound_properties:
9786                         object.__setattr__(self, name, 0)
9787
9788                 if self._displayed:
9789                         self.out.write(self._term_codes['newline'])
9790                         self.out.flush()
9791                         self._displayed = False
9792
9793         def __setattr__(self, name, value):
9794                 old_value = getattr(self, name)
9795                 if value == old_value:
9796                         return
9797                 object.__setattr__(self, name, value)
9798                 if name in self._bound_properties:
9799                         self._property_change(name, old_value, value)
9800
9801         def _property_change(self, name, old_value, new_value):
9802                 self._changed = True
9803                 self.display()
9804
9805         def _load_avg_str(self):
9806                 try:
9807                         avg = getloadavg()
9808                 except OSError:
9809                         return 'unknown'
9810
9811                 max_avg = max(avg)
9812
9813                 if max_avg < 10:
9814                         digits = 2
9815                 elif max_avg < 100:
9816                         digits = 1
9817                 else:
9818                         digits = 0
9819
9820                 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9821
9822         def display(self):
9823                 """
9824                 Display status on stdout, but only if something has
9825                 changed since the last call.
9826                 """
9827
9828                 if self.quiet:
9829                         return
9830
9831                 current_time = time.time()
9832                 time_delta = current_time - self._last_display_time
9833                 if self._displayed and \
9834                         not self._changed:
9835                         if not self._isatty:
9836                                 return
9837                         if time_delta < self._min_display_latency:
9838                                 return
9839
9840                 self._last_display_time = current_time
9841                 self._changed = False
9842                 self._display_status()
9843
9844         def _display_status(self):
9845                 # Don't use len(self._completed_tasks) here since that also
9846                 # can include uninstall tasks.
9847                 curval_str = str(self.curval)
9848                 maxval_str = str(self.maxval)
9849                 running_str = str(self.running)
9850                 failed_str = str(self.failed)
9851                 load_avg_str = self._load_avg_str()
9852
9853                 color_output = StringIO()
9854                 plain_output = StringIO()
9855                 style_file = portage.output.ConsoleStyleFile(color_output)
9856                 style_file.write_listener = plain_output
9857                 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9858                 style_writer.style_listener = style_file.new_styles
9859                 f = formatter.AbstractFormatter(style_writer)
9860
9861                 number_style = "INFORM"
9862                 f.add_literal_data("Jobs: ")
9863                 f.push_style(number_style)
9864                 f.add_literal_data(curval_str)
9865                 f.pop_style()
9866                 f.add_literal_data(" of ")
9867                 f.push_style(number_style)
9868                 f.add_literal_data(maxval_str)
9869                 f.pop_style()
9870                 f.add_literal_data(" complete")
9871
9872                 if self.running:
9873                         f.add_literal_data(", ")
9874                         f.push_style(number_style)
9875                         f.add_literal_data(running_str)
9876                         f.pop_style()
9877                         f.add_literal_data(" running")
9878
9879                 if self.failed:
9880                         f.add_literal_data(", ")
9881                         f.push_style(number_style)
9882                         f.add_literal_data(failed_str)
9883                         f.pop_style()
9884                         f.add_literal_data(" failed")
9885
9886                 padding = self._jobs_column_width - len(plain_output.getvalue())
9887                 if padding > 0:
9888                         f.add_literal_data(padding * " ")
9889
9890                 f.add_literal_data("Load avg: ")
9891                 f.add_literal_data(load_avg_str)
9892
9893                 # Truncate to fit width, to avoid making the terminal scroll if the
9894                 # line overflows (happens when the load average is large).
9895                 plain_output = plain_output.getvalue()
9896                 if self._isatty and len(plain_output) > self.width:
9897                         # Use plain_output here since it's easier to truncate
9898                         # properly than the color output which contains console
9899                         # color codes.
9900                         self._update(plain_output[:self.width])
9901                 else:
9902                         self._update(color_output.getvalue())
9903
9904                 xtermTitle(" ".join(plain_output.split()))
9905
9906 class Scheduler(PollScheduler):
9907
9908         _opts_ignore_blockers = \
9909                 frozenset(["--buildpkgonly",
9910                 "--fetchonly", "--fetch-all-uri",
9911                 "--nodeps", "--pretend"])
9912
9913         _opts_no_background = \
9914                 frozenset(["--pretend",
9915                 "--fetchonly", "--fetch-all-uri"])
9916
9917         _opts_no_restart = frozenset(["--buildpkgonly",
9918                 "--fetchonly", "--fetch-all-uri", "--pretend"])
9919
9920         _bad_resume_opts = set(["--ask", "--changelog",
9921                 "--resume", "--skipfirst"])
9922
9923         _fetch_log = "/var/log/emerge-fetch.log"
9924
9925         class _iface_class(SlotObject):
9926                 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
9927                         "dblinkElog", "fetch", "register", "schedule",
9928                         "scheduleSetup", "scheduleUnpack", "scheduleYield",
9929                         "unregister")
9930
9931         class _fetch_iface_class(SlotObject):
9932                 __slots__ = ("log_file", "schedule")
9933
9934         _task_queues_class = slot_dict_class(
9935                 ("merge", "jobs", "fetch", "unpack"), prefix="")
9936
9937         class _build_opts_class(SlotObject):
9938                 __slots__ = ("buildpkg", "buildpkgonly",
9939                         "fetch_all_uri", "fetchonly", "pretend")
9940
9941         class _binpkg_opts_class(SlotObject):
9942                 __slots__ = ("fetchonly", "getbinpkg", "pretend")
9943
9944         class _pkg_count_class(SlotObject):
9945                 __slots__ = ("curval", "maxval")
9946
9947         class _emerge_log_class(SlotObject):
9948                 __slots__ = ("xterm_titles",)
9949
9950                 def log(self, *pargs, **kwargs):
9951                         if not self.xterm_titles:
9952                                 # Avoid interference with the scheduler's status display.
9953                                 kwargs.pop("short_msg", None)
9954                         emergelog(self.xterm_titles, *pargs, **kwargs)
9955
9956         class _failed_pkg(SlotObject):
9957                 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
9958
9959         class _ConfigPool(object):
9960                 """Interface for a task to temporarily allocate a config
9961                 instance from a pool. This allows a task to be constructed
9962                 long before the config instance actually becomes needed, like
9963                 when prefetchers are constructed for the whole merge list."""
9964                 __slots__ = ("_root", "_allocate", "_deallocate")
9965                 def __init__(self, root, allocate, deallocate):
9966                         self._root = root
9967                         self._allocate = allocate
9968                         self._deallocate = deallocate
9969                 def allocate(self):
9970                         return self._allocate(self._root)
9971                 def deallocate(self, settings):
9972                         self._deallocate(settings)
9973
9974         class _unknown_internal_error(portage.exception.PortageException):
9975                 """
9976                 Used internally to terminate scheduling. The specific reason for
9977                 the failure should have been dumped to stderr.
9978                 """
9979                 def __init__(self, value=""):
9980                         portage.exception.PortageException.__init__(self, value)
9981
9982         def __init__(self, settings, trees, mtimedb, myopts,
9983                 spinner, mergelist, favorites, digraph):
9984                 PollScheduler.__init__(self)
9985                 self.settings = settings
9986                 self.target_root = settings["ROOT"]
9987                 self.trees = trees
9988                 self.myopts = myopts
9989                 self._spinner = spinner
9990                 self._mtimedb = mtimedb
9991                 self._mergelist = mergelist
9992                 self._favorites = favorites
9993                 self._args_set = InternalPackageSet(favorites)
9994                 self._build_opts = self._build_opts_class()
9995                 for k in self._build_opts.__slots__:
9996                         setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
9997                 self._binpkg_opts = self._binpkg_opts_class()
9998                 for k in self._binpkg_opts.__slots__:
9999                         setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
10000
10001                 self.curval = 0
10002                 self._logger = self._emerge_log_class()
10003                 self._task_queues = self._task_queues_class()
10004                 for k in self._task_queues.allowed_keys:
10005                         setattr(self._task_queues, k,
10006                                 SequentialTaskQueue())
10007
10008                 # Holds merges that will wait to be executed when no builds are
10009                 # executing. This is useful for system packages since dependencies
10010                 # on system packages are frequently unspecified.
10011                 self._merge_wait_queue = []
10012                 # Holds merges that have been transfered from the merge_wait_queue to
10013                 # the actual merge queue. They are removed from this list upon
10014                 # completion. Other packages can start building only when this list is
10015                 # empty.
10016                 self._merge_wait_scheduled = []
10017
10018                 # Holds system packages and their deep runtime dependencies. Before
10019                 # being merged, these packages go to merge_wait_queue, to be merged
10020                 # when no other packages are building.
10021                 self._deep_system_deps = set()
10022
10023                 # Holds packages to merge which will satisfy currently unsatisfied
10024                 # deep runtime dependencies of system packages. If this is not empty
10025                 # then no parallel builds will be spawned until it is empty. This
10026                 # minimizes the possibility that a build will fail due to the system
10027                 # being in a fragile state. For example, see bug #259954.
10028                 self._unsatisfied_system_deps = set()
10029
10030                 self._status_display = JobStatusDisplay()
10031                 self._max_load = myopts.get("--load-average")
10032                 max_jobs = myopts.get("--jobs")
10033                 if max_jobs is None:
10034                         max_jobs = 1
10035                 self._set_max_jobs(max_jobs)
10036
10037                 # The root where the currently running
10038                 # portage instance is installed.
10039                 self._running_root = trees["/"]["root_config"]
10040                 self.edebug = 0
10041                 if settings.get("PORTAGE_DEBUG", "") == "1":
10042                         self.edebug = 1
10043                 self.pkgsettings = {}
10044                 self._config_pool = {}
10045                 self._blocker_db = {}
10046                 for root in trees:
10047                         self._config_pool[root] = []
10048                         self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
10049
10050                 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
10051                         schedule=self._schedule_fetch)
10052                 self._sched_iface = self._iface_class(
10053                         dblinkEbuildPhase=self._dblink_ebuild_phase,
10054                         dblinkDisplayMerge=self._dblink_display_merge,
10055                         dblinkElog=self._dblink_elog,
10056                         fetch=fetch_iface, register=self._register,
10057                         schedule=self._schedule_wait,
10058                         scheduleSetup=self._schedule_setup,
10059                         scheduleUnpack=self._schedule_unpack,
10060                         scheduleYield=self._schedule_yield,
10061                         unregister=self._unregister)
10062
10063                 self._prefetchers = weakref.WeakValueDictionary()
10064                 self._pkg_queue = []
10065                 self._completed_tasks = set()
10066
10067                 self._failed_pkgs = []
10068                 self._failed_pkgs_all = []
10069                 self._failed_pkgs_die_msgs = []
10070                 self._post_mod_echo_msgs = []
10071                 self._parallel_fetch = False
10072                 merge_count = len([x for x in mergelist \
10073                         if isinstance(x, Package) and x.operation == "merge"])
10074                 self._pkg_count = self._pkg_count_class(
10075                         curval=0, maxval=merge_count)
10076                 self._status_display.maxval = self._pkg_count.maxval
10077
10078                 # The load average takes some time to respond when new
10079                 # jobs are added, so we need to limit the rate of adding
10080                 # new jobs.
10081                 self._job_delay_max = 10
10082                 self._job_delay_factor = 1.0
10083                 self._job_delay_exp = 1.5
10084                 self._previous_job_start_time = None
10085
10086                 self._set_digraph(digraph)
10087
10088                 # This is used to memoize the _choose_pkg() result when
10089                 # no packages can be chosen until one of the existing
10090                 # jobs completes.
10091                 self._choose_pkg_return_early = False
10092
10093                 features = self.settings.features
10094                 if "parallel-fetch" in features and \
10095                         not ("--pretend" in self.myopts or \
10096                         "--fetch-all-uri" in self.myopts or \
10097                         "--fetchonly" in self.myopts):
10098                         if "distlocks" not in features:
10099                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10100                                 portage.writemsg(red("!!!")+" parallel-fetching " + \
10101                                         "requires the distlocks feature enabled"+"\n",
10102                                         noiselevel=-1)
10103                                 portage.writemsg(red("!!!")+" you have it disabled, " + \
10104                                         "thus parallel-fetching is being disabled"+"\n",
10105                                         noiselevel=-1)
10106                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10107                         elif len(mergelist) > 1:
10108                                 self._parallel_fetch = True
10109
10110                 if self._parallel_fetch:
10111                                 # clear out existing fetch log if it exists
10112                                 try:
10113                                         open(self._fetch_log, 'w')
10114                                 except EnvironmentError:
10115                                         pass
10116
10117                 self._running_portage = None
10118                 portage_match = self._running_root.trees["vartree"].dbapi.match(
10119                         portage.const.PORTAGE_PACKAGE_ATOM)
10120                 if portage_match:
10121                         cpv = portage_match.pop()
10122                         self._running_portage = self._pkg(cpv, "installed",
10123                                 self._running_root, installed=True)
10124
10125         def _poll(self, timeout=None):
10126                 self._schedule()
10127                 PollScheduler._poll(self, timeout=timeout)
10128
10129         def _set_max_jobs(self, max_jobs):
10130                 self._max_jobs = max_jobs
10131                 self._task_queues.jobs.max_jobs = max_jobs
10132
10133         def _background_mode(self):
10134                 """
10135                 Check if background mode is enabled and adjust states as necessary.
10136
10137                 @rtype: bool
10138                 @returns: True if background mode is enabled, False otherwise.
10139                 """
10140                 background = (self._max_jobs is True or \
10141                         self._max_jobs > 1 or "--quiet" in self.myopts) and \
10142                         not bool(self._opts_no_background.intersection(self.myopts))
10143
10144                 if background:
10145                         interactive_tasks = self._get_interactive_tasks()
10146                         if interactive_tasks:
10147                                 background = False
10148                                 writemsg_level(">>> Sending package output to stdio due " + \
10149                                         "to interactive package(s):\n",
10150                                         level=logging.INFO, noiselevel=-1)
10151                                 msg = [""]
10152                                 for pkg in interactive_tasks:
10153                                         pkg_str = "  " + colorize("INFORM", str(pkg.cpv))
10154                                         if pkg.root != "/":
10155                                                 pkg_str += " for " + pkg.root
10156                                         msg.append(pkg_str)
10157                                 msg.append("")
10158                                 writemsg_level("".join("%s\n" % (l,) for l in msg),
10159                                         level=logging.INFO, noiselevel=-1)
10160                                 if self._max_jobs is True or self._max_jobs > 1:
10161                                         self._set_max_jobs(1)
10162                                         writemsg_level(">>> Setting --jobs=1 due " + \
10163                                                 "to the above interactive package(s)\n",
10164                                                 level=logging.INFO, noiselevel=-1)
10165
10166                 self._status_display.quiet = \
10167                         not background or \
10168                         ("--quiet" in self.myopts and \
10169                         "--verbose" not in self.myopts)
10170
10171                 self._logger.xterm_titles = \
10172                         "notitles" not in self.settings.features and \
10173                         self._status_display.quiet
10174
10175                 return background
10176
10177         def _get_interactive_tasks(self):
10178                 from portage import flatten
10179                 from portage.dep import use_reduce, paren_reduce
10180                 interactive_tasks = []
10181                 for task in self._mergelist:
10182                         if not (isinstance(task, Package) and \
10183                                 task.operation == "merge"):
10184                                 continue
10185                         try:
10186                                 properties = flatten(use_reduce(paren_reduce(
10187                                         task.metadata["PROPERTIES"]), uselist=task.use.enabled))
10188                         except portage.exception.InvalidDependString, e:
10189                                 show_invalid_depstring_notice(task,
10190                                         task.metadata["PROPERTIES"], str(e))
10191                                 raise self._unknown_internal_error()
10192                         if "interactive" in properties:
10193                                 interactive_tasks.append(task)
10194                 return interactive_tasks
10195
10196         def _set_digraph(self, digraph):
10197                 if "--nodeps" in self.myopts or \
10198                         (self._max_jobs is not True and self._max_jobs < 2):
10199                         # save some memory
10200                         self._digraph = None
10201                         return
10202
10203                 self._digraph = digraph
10204                 self._find_system_deps()
10205                 self._prune_digraph()
10206                 self._prevent_builddir_collisions()
10207
10208         def _find_system_deps(self):
10209                 """
10210                 Find system packages and their deep runtime dependencies. Before being
10211                 merged, these packages go to merge_wait_queue, to be merged when no
10212                 other packages are building.
10213                 """
10214                 deep_system_deps = self._deep_system_deps
10215                 deep_system_deps.clear()
10216                 deep_system_deps.update(
10217                         _find_deep_system_runtime_deps(self._digraph))
10218                 deep_system_deps.difference_update([pkg for pkg in \
10219                         deep_system_deps if pkg.operation != "merge"])
10220
10221         def _prune_digraph(self):
10222                 """
10223                 Prune any root nodes that are irrelevant.
10224                 """
10225
10226                 graph = self._digraph
10227                 completed_tasks = self._completed_tasks
10228                 removed_nodes = set()
10229                 while True:
10230                         for node in graph.root_nodes():
10231                                 if not isinstance(node, Package) or \
10232                                         (node.installed and node.operation == "nomerge") or \
10233                                         node.onlydeps or \
10234                                         node in completed_tasks:
10235                                         removed_nodes.add(node)
10236                         if removed_nodes:
10237                                 graph.difference_update(removed_nodes)
10238                         if not removed_nodes:
10239                                 break
10240                         removed_nodes.clear()
10241
10242         def _prevent_builddir_collisions(self):
10243                 """
10244                 When building stages, sometimes the same exact cpv needs to be merged
10245                 to both $ROOTs. Add edges to the digraph in order to avoid collisions
10246                 in the builddir. Currently, normal file locks would be inappropriate
10247                 for this purpose since emerge holds all of it's build dir locks from
10248                 the main process.
10249                 """
10250                 cpv_map = {}
10251                 for pkg in self._mergelist:
10252                         if not isinstance(pkg, Package):
10253                                 # a satisfied blocker
10254                                 continue
10255                         if pkg.installed:
10256                                 continue
10257                         if pkg.cpv not in cpv_map:
10258                                 cpv_map[pkg.cpv] = [pkg]
10259                                 continue
10260                         for earlier_pkg in cpv_map[pkg.cpv]:
10261                                 self._digraph.add(earlier_pkg, pkg,
10262                                         priority=DepPriority(buildtime=True))
10263                         cpv_map[pkg.cpv].append(pkg)
10264
10265         class _pkg_failure(portage.exception.PortageException):
10266                 """
10267                 An instance of this class is raised by unmerge() when
10268                 an uninstallation fails.
10269                 """
10270                 status = 1
10271                 def __init__(self, *pargs):
10272                         portage.exception.PortageException.__init__(self, pargs)
10273                         if pargs:
10274                                 self.status = pargs[0]
10275
10276         def _schedule_fetch(self, fetcher):
10277                 """
10278                 Schedule a fetcher on the fetch queue, in order to
10279                 serialize access to the fetch log.
10280                 """
10281                 self._task_queues.fetch.addFront(fetcher)
10282
10283         def _schedule_setup(self, setup_phase):
10284                 """
10285                 Schedule a setup phase on the merge queue, in order to
10286                 serialize unsandboxed access to the live filesystem.
10287                 """
10288                 self._task_queues.merge.addFront(setup_phase)
10289                 self._schedule()
10290
10291         def _schedule_unpack(self, unpack_phase):
10292                 """
10293                 Schedule an unpack phase on the unpack queue, in order
10294                 to serialize $DISTDIR access for live ebuilds.
10295                 """
10296                 self._task_queues.unpack.add(unpack_phase)
10297
10298         def _find_blockers(self, new_pkg):
10299                 """
10300                 Returns a callable which should be called only when
10301                 the vdb lock has been acquired.
10302                 """
10303                 def get_blockers():
10304                         return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
10305                 return get_blockers
10306
10307         def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
10308                 if self._opts_ignore_blockers.intersection(self.myopts):
10309                         return None
10310
10311                 # Call gc.collect() here to avoid heap overflow that
10312                 # triggers 'Cannot allocate memory' errors (reported
10313                 # with python-2.5).
10314                 import gc
10315                 gc.collect()
10316
10317                 blocker_db = self._blocker_db[new_pkg.root]
10318
10319                 blocker_dblinks = []
10320                 for blocking_pkg in blocker_db.findInstalledBlockers(
10321                         new_pkg, acquire_lock=acquire_lock):
10322                         if new_pkg.slot_atom == blocking_pkg.slot_atom:
10323                                 continue
10324                         if new_pkg.cpv == blocking_pkg.cpv:
10325                                 continue
10326                         blocker_dblinks.append(portage.dblink(
10327                                 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
10328                                 self.pkgsettings[blocking_pkg.root], treetype="vartree",
10329                                 vartree=self.trees[blocking_pkg.root]["vartree"]))
10330
10331                 gc.collect()
10332
10333                 return blocker_dblinks
10334
10335         def _dblink_pkg(self, pkg_dblink):
10336                 cpv = pkg_dblink.mycpv
10337                 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10338                 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10339                 installed = type_name == "installed"
10340                 return self._pkg(cpv, type_name, root_config, installed=installed)
10341
10342         def _append_to_log_path(self, log_path, msg):
10343                 f = open(log_path, 'a')
10344                 try:
10345                         f.write(msg)
10346                 finally:
10347                         f.close()
10348
10349         def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10350
10351                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10352                 log_file = None
10353                 out = sys.stdout
10354                 background = self._background
10355
10356                 if background and log_path is not None:
10357                         log_file = open(log_path, 'a')
10358                         out = log_file
10359
10360                 try:
10361                         for msg in msgs:
10362                                 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10363                 finally:
10364                         if log_file is not None:
10365                                 log_file.close()
10366
10367         def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10368                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10369                 background = self._background
10370
10371                 if log_path is None:
10372                         if not (background and level < logging.WARN):
10373                                 portage.util.writemsg_level(msg,
10374                                         level=level, noiselevel=noiselevel)
10375                 else:
10376                         if not background:
10377                                 portage.util.writemsg_level(msg,
10378                                         level=level, noiselevel=noiselevel)
10379                         self._append_to_log_path(log_path, msg)
10380
10381         def _dblink_ebuild_phase(self,
10382                 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10383                 """
10384                 Using this callback for merge phases allows the scheduler
10385                 to run while these phases execute asynchronously, and allows
10386                 the scheduler control output handling.
10387                 """
10388
10389                 scheduler = self._sched_iface
10390                 settings = pkg_dblink.settings
10391                 pkg = self._dblink_pkg(pkg_dblink)
10392                 background = self._background
10393                 log_path = settings.get("PORTAGE_LOG_FILE")
10394
10395                 ebuild_phase = EbuildPhase(background=background,
10396                         pkg=pkg, phase=phase, scheduler=scheduler,
10397                         settings=settings, tree=pkg_dblink.treetype)
10398                 ebuild_phase.start()
10399                 ebuild_phase.wait()
10400
10401                 return ebuild_phase.returncode
10402
10403         def _check_manifests(self):
10404                 # Verify all the manifests now so that the user is notified of failure
10405                 # as soon as possible.
10406                 if "strict" not in self.settings.features or \
10407                         "--fetchonly" in self.myopts or \
10408                         "--fetch-all-uri" in self.myopts:
10409                         return os.EX_OK
10410
10411                 shown_verifying_msg = False
10412                 quiet_settings = {}
10413                 for myroot, pkgsettings in self.pkgsettings.iteritems():
10414                         quiet_config = portage.config(clone=pkgsettings)
10415                         quiet_config["PORTAGE_QUIET"] = "1"
10416                         quiet_config.backup_changes("PORTAGE_QUIET")
10417                         quiet_settings[myroot] = quiet_config
10418                         del quiet_config
10419
10420                 for x in self._mergelist:
10421                         if not isinstance(x, Package) or \
10422                                 x.type_name != "ebuild":
10423                                 continue
10424
10425                         if not shown_verifying_msg:
10426                                 shown_verifying_msg = True
10427                                 self._status_msg("Verifying ebuild manifests")
10428
10429                         root_config = x.root_config
10430                         portdb = root_config.trees["porttree"].dbapi
10431                         quiet_config = quiet_settings[root_config.root]
10432                         quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10433                         if not portage.digestcheck([], quiet_config, strict=True):
10434                                 return 1
10435
10436                 return os.EX_OK
10437
10438         def _add_prefetchers(self):
10439
10440                 if not self._parallel_fetch:
10441                         return
10442
10443                 if self._parallel_fetch:
10444                         self._status_msg("Starting parallel fetch")
10445
10446                         prefetchers = self._prefetchers
10447                         getbinpkg = "--getbinpkg" in self.myopts
10448
10449                         # In order to avoid "waiting for lock" messages
10450                         # at the beginning, which annoy users, never
10451                         # spawn a prefetcher for the first package.
10452                         for pkg in self._mergelist[1:]:
10453                                 prefetcher = self._create_prefetcher(pkg)
10454                                 if prefetcher is not None:
10455                                         self._task_queues.fetch.add(prefetcher)
10456                                         prefetchers[pkg] = prefetcher
10457
10458         def _create_prefetcher(self, pkg):
10459                 """
10460                 @return: a prefetcher, or None if not applicable
10461                 """
10462                 prefetcher = None
10463
10464                 if not isinstance(pkg, Package):
10465                         pass
10466
10467                 elif pkg.type_name == "ebuild":
10468
10469                         prefetcher = EbuildFetcher(background=True,
10470                                 config_pool=self._ConfigPool(pkg.root,
10471                                 self._allocate_config, self._deallocate_config),
10472                                 fetchonly=1, logfile=self._fetch_log,
10473                                 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10474
10475                 elif pkg.type_name == "binary" and \
10476                         "--getbinpkg" in self.myopts and \
10477                         pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10478
10479                         prefetcher = BinpkgPrefetcher(background=True,
10480                                 pkg=pkg, scheduler=self._sched_iface)
10481
10482                 return prefetcher
10483
10484         def _is_restart_scheduled(self):
10485                 """
10486                 Check if the merge list contains a replacement
10487                 for the current running instance, that will result
10488                 in restart after merge.
10489                 @rtype: bool
10490                 @returns: True if a restart is scheduled, False otherwise.
10491                 """
10492                 if self._opts_no_restart.intersection(self.myopts):
10493                         return False
10494
10495                 mergelist = self._mergelist
10496
10497                 for i, pkg in enumerate(mergelist):
10498                         if self._is_restart_necessary(pkg) and \
10499                                 i != len(mergelist) - 1:
10500                                 return True
10501
10502                 return False
10503
10504         def _is_restart_necessary(self, pkg):
10505                 """
10506                 @return: True if merging the given package
10507                         requires restart, False otherwise.
10508                 """
10509
10510                 # Figure out if we need a restart.
10511                 if pkg.root == self._running_root.root and \
10512                         portage.match_from_list(
10513                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10514                         if self._running_portage:
10515                                 return pkg.cpv != self._running_portage.cpv
10516                         return True
10517                 return False
10518
10519         def _restart_if_necessary(self, pkg):
10520                 """
10521                 Use execv() to restart emerge. This happens
10522                 if portage upgrades itself and there are
10523                 remaining packages in the list.
10524                 """
10525
10526                 if self._opts_no_restart.intersection(self.myopts):
10527                         return
10528
10529                 if not self._is_restart_necessary(pkg):
10530                         return
10531
10532                 if pkg == self._mergelist[-1]:
10533                         return
10534
10535                 self._main_loop_cleanup()
10536
10537                 logger = self._logger
10538                 pkg_count = self._pkg_count
10539                 mtimedb = self._mtimedb
10540                 bad_resume_opts = self._bad_resume_opts
10541
10542                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10543                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10544
10545                 logger.log(" *** RESTARTING " + \
10546                         "emerge via exec() after change of " + \
10547                         "portage version.")
10548
10549                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10550                 mtimedb.commit()
10551                 portage.run_exitfuncs()
10552                 mynewargv = [sys.argv[0], "--resume"]
10553                 resume_opts = self.myopts.copy()
10554                 # For automatic resume, we need to prevent
10555                 # any of bad_resume_opts from leaking in
10556                 # via EMERGE_DEFAULT_OPTS.
10557                 resume_opts["--ignore-default-opts"] = True
10558                 for myopt, myarg in resume_opts.iteritems():
10559                         if myopt not in bad_resume_opts:
10560                                 if myarg is True:
10561                                         mynewargv.append(myopt)
10562                                 else:
10563                                         mynewargv.append(myopt +"="+ str(myarg))
10564                 # priority only needs to be adjusted on the first run
10565                 os.environ["PORTAGE_NICENESS"] = "0"
10566                 os.execv(mynewargv[0], mynewargv)
10567
10568         def merge(self):
10569
10570                 if "--resume" in self.myopts:
10571                         # We're resuming.
10572                         portage.writemsg_stdout(
10573                                 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10574                         self._logger.log(" *** Resuming merge...")
10575
10576                 self._save_resume_list()
10577
10578                 try:
10579                         self._background = self._background_mode()
10580                 except self._unknown_internal_error:
10581                         return 1
10582
10583                 for root in self.trees:
10584                         root_config = self.trees[root]["root_config"]
10585
10586                         # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10587                         # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10588                         # for ensuring sane $PWD (bug #239560) and storing elog messages.
10589                         tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10590                         if not tmpdir or not os.path.isdir(tmpdir):
10591                                 msg = "The directory specified in your " + \
10592                                         "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10593                                 "does not exist. Please create this " + \
10594                                 "directory or correct your PORTAGE_TMPDIR setting."
10595                                 msg = textwrap.wrap(msg, 70)
10596                                 out = portage.output.EOutput()
10597                                 for l in msg:
10598                                         out.eerror(l)
10599                                 return 1
10600
10601                         if self._background:
10602                                 root_config.settings.unlock()
10603                                 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10604                                 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10605                                 root_config.settings.lock()
10606
10607                         self.pkgsettings[root] = portage.config(
10608                                 clone=root_config.settings)
10609
10610                 rval = self._check_manifests()
10611                 if rval != os.EX_OK:
10612                         return rval
10613
10614                 keep_going = "--keep-going" in self.myopts
10615                 fetchonly = self._build_opts.fetchonly
10616                 mtimedb = self._mtimedb
10617                 failed_pkgs = self._failed_pkgs
10618
10619                 while True:
10620                         rval = self._merge()
10621                         if rval == os.EX_OK or fetchonly or not keep_going:
10622                                 break
10623                         if "resume" not in mtimedb:
10624                                 break
10625                         mergelist = self._mtimedb["resume"].get("mergelist")
10626                         if not mergelist:
10627                                 break
10628
10629                         if not failed_pkgs:
10630                                 break
10631
10632                         for failed_pkg in failed_pkgs:
10633                                 mergelist.remove(list(failed_pkg.pkg))
10634
10635                         self._failed_pkgs_all.extend(failed_pkgs)
10636                         del failed_pkgs[:]
10637
10638                         if not mergelist:
10639                                 break
10640
10641                         if not self._calc_resume_list():
10642                                 break
10643
10644                         clear_caches(self.trees)
10645                         if not self._mergelist:
10646                                 break
10647
10648                         self._save_resume_list()
10649                         self._pkg_count.curval = 0
10650                         self._pkg_count.maxval = len([x for x in self._mergelist \
10651                                 if isinstance(x, Package) and x.operation == "merge"])
10652                         self._status_display.maxval = self._pkg_count.maxval
10653
10654                 self._logger.log(" *** Finished. Cleaning up...")
10655
10656                 if failed_pkgs:
10657                         self._failed_pkgs_all.extend(failed_pkgs)
10658                         del failed_pkgs[:]
10659
10660                 background = self._background
10661                 failure_log_shown = False
10662                 if background and len(self._failed_pkgs_all) == 1:
10663                         # If only one package failed then just show it's
10664                         # whole log for easy viewing.
10665                         failed_pkg = self._failed_pkgs_all[-1]
10666                         build_dir = failed_pkg.build_dir
10667                         log_file = None
10668
10669                         log_paths = [failed_pkg.build_log]
10670
10671                         log_path = self._locate_failure_log(failed_pkg)
10672                         if log_path is not None:
10673                                 try:
10674                                         log_file = open(log_path)
10675                                 except IOError:
10676                                         pass
10677
10678                         if log_file is not None:
10679                                 try:
10680                                         for line in log_file:
10681                                                 writemsg_level(line, noiselevel=-1)
10682                                 finally:
10683                                         log_file.close()
10684                                 failure_log_shown = True
10685
10686                 # Dump mod_echo output now since it tends to flood the terminal.
10687                 # This allows us to avoid having more important output, generated
10688                 # later, from being swept away by the mod_echo output.
10689                 mod_echo_output =  _flush_elog_mod_echo()
10690
10691                 if background and not failure_log_shown and \
10692                         self._failed_pkgs_all and \
10693                         self._failed_pkgs_die_msgs and \
10694                         not mod_echo_output:
10695
10696                         printer = portage.output.EOutput()
10697                         for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10698                                 root_msg = ""
10699                                 if mysettings["ROOT"] != "/":
10700                                         root_msg = " merged to %s" % mysettings["ROOT"]
10701                                 print
10702                                 printer.einfo("Error messages for package %s%s:" % \
10703                                         (colorize("INFORM", key), root_msg))
10704                                 print
10705                                 for phase in portage.const.EBUILD_PHASES:
10706                                         if phase not in logentries:
10707                                                 continue
10708                                         for msgtype, msgcontent in logentries[phase]:
10709                                                 if isinstance(msgcontent, basestring):
10710                                                         msgcontent = [msgcontent]
10711                                                 for line in msgcontent:
10712                                                         printer.eerror(line.strip("\n"))
10713
10714                 if self._post_mod_echo_msgs:
10715                         for msg in self._post_mod_echo_msgs:
10716                                 msg()
10717
10718                 if len(self._failed_pkgs_all) > 1 or \
10719                         (self._failed_pkgs_all and "--keep-going" in self.myopts):
10720                         if len(self._failed_pkgs_all) > 1:
10721                                 msg = "The following %d packages have " % \
10722                                         len(self._failed_pkgs_all) + \
10723                                         "failed to build or install:"
10724                         else:
10725                                 msg = "The following package has " + \
10726                                         "failed to build or install:"
10727                         prefix = bad(" * ")
10728                         writemsg(prefix + "\n", noiselevel=-1)
10729                         from textwrap import wrap
10730                         for line in wrap(msg, 72):
10731                                 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10732                         writemsg(prefix + "\n", noiselevel=-1)
10733                         for failed_pkg in self._failed_pkgs_all:
10734                                 writemsg("%s\t%s\n" % (prefix,
10735                                         colorize("INFORM", str(failed_pkg.pkg))),
10736                                         noiselevel=-1)
10737                         writemsg(prefix + "\n", noiselevel=-1)
10738
10739                 return rval
10740
10741         def _elog_listener(self, mysettings, key, logentries, fulltext):
10742                 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10743                 if errors:
10744                         self._failed_pkgs_die_msgs.append(
10745                                 (mysettings, key, errors))
10746
10747         def _locate_failure_log(self, failed_pkg):
10748
10749                 build_dir = failed_pkg.build_dir
10750                 log_file = None
10751
10752                 log_paths = [failed_pkg.build_log]
10753
10754                 for log_path in log_paths:
10755                         if not log_path:
10756                                 continue
10757
10758                         try:
10759                                 log_size = os.stat(log_path).st_size
10760                         except OSError:
10761                                 continue
10762
10763                         if log_size == 0:
10764                                 continue
10765
10766                         return log_path
10767
10768                 return None
10769
10770         def _add_packages(self):
10771                 pkg_queue = self._pkg_queue
10772                 for pkg in self._mergelist:
10773                         if isinstance(pkg, Package):
10774                                 pkg_queue.append(pkg)
10775                         elif isinstance(pkg, Blocker):
10776                                 pass
10777
10778         def _system_merge_started(self, merge):
10779                 """
10780                 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
10781                 """
10782                 graph = self._digraph
10783                 if graph is None:
10784                         return
10785                 pkg = merge.merge.pkg
10786                 completed_tasks = self._completed_tasks
10787                 unsatisfied = self._unsatisfied_system_deps
10788
10789                 def ignore_non_runtime(priority):
10790                         """
10791                         Ignore non-runtime priorities
10792                         """
10793                         if isinstance(priority, DepPriority) and \
10794                                 (priority.runtime or priority.runtime_post):
10795                                 return False
10796                         return True
10797
10798                 def ignore_satisfied_runtime(priority):
10799                         """
10800                         Ignore non-runtime and satisfied runtime priorities.
10801                         """
10802                         if isinstance(priority, DepPriority) and \
10803                                 not priority.satisfied and \
10804                                 (priority.runtime or priority.runtime_post):
10805                                 return False
10806                         return True
10807
10808                 traversed = set()
10809                 dep_stack = [pkg]
10810                 while dep_stack:
10811                         node = dep_stack.pop()
10812                         if node in traversed:
10813                                 continue
10814                         traversed.add(node)
10815
10816                         unsatisfied_runtime = set(graph.child_nodes(node,
10817                                 ignore_priority=ignore_satisfied_runtime))
10818                         for child in graph.child_nodes(node,
10819                                 ignore_priority=ignore_non_runtime):
10820                                 if not isinstance(child, Package) or \
10821                                         child.operation == 'uninstall':
10822                                         continue
10823                                 if child is pkg:
10824                                         continue
10825                                 if child.operation == 'merge' and \
10826                                         child in completed_tasks:
10827                                         # When traversing children, only traverse completed
10828                                         # 'merge' nodes since those are the only ones that need
10829                                         # to be checked for unsatisfied runtime deps, and it's
10830                                         # normal for nodes that aren't yet complete to have
10831                                         # unsatisfied runtime deps.
10832                                         dep_stack.append(child)
10833                                 if child.operation == 'merge' and \
10834                                         child not in completed_tasks and \
10835                                         child in unsatisfied_runtime:
10836                                         unsatisfied.add(child)
10837
10838         def _merge_wait_exit_handler(self, task):
10839                 self._merge_wait_scheduled.remove(task)
10840                 self._merge_exit(task)
10841
10842         def _merge_exit(self, merge):
10843                 self._do_merge_exit(merge)
10844                 self._deallocate_config(merge.merge.settings)
10845                 if merge.returncode == os.EX_OK and \
10846                         not merge.merge.pkg.installed:
10847                         self._status_display.curval += 1
10848                 self._status_display.merges = len(self._task_queues.merge)
10849                 self._schedule()
10850
10851         def _do_merge_exit(self, merge):
10852                 pkg = merge.merge.pkg
10853                 if merge.returncode != os.EX_OK:
10854                         settings = merge.merge.settings
10855                         build_dir = settings.get("PORTAGE_BUILDDIR")
10856                         build_log = settings.get("PORTAGE_LOG_FILE")
10857
10858                         self._failed_pkgs.append(self._failed_pkg(
10859                                 build_dir=build_dir, build_log=build_log,
10860                                 pkg=pkg,
10861                                 returncode=merge.returncode))
10862                         self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
10863
10864                         self._status_display.failed = len(self._failed_pkgs)
10865                         return
10866
10867                 self._task_complete(pkg)
10868                 pkg_to_replace = merge.merge.pkg_to_replace
10869                 if pkg_to_replace is not None:
10870                         # When a package is replaced, mark it's uninstall
10871                         # task complete (if any).
10872                         uninst_hash_key = \
10873                                 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
10874                         self._task_complete(uninst_hash_key)
10875
10876                 if pkg.installed:
10877                         return
10878
10879                 self._restart_if_necessary(pkg)
10880
10881                 # Call mtimedb.commit() after each merge so that
10882                 # --resume still works after being interrupted
10883                 # by reboot, sigkill or similar.
10884                 mtimedb = self._mtimedb
10885                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10886                 if not mtimedb["resume"]["mergelist"]:
10887                         del mtimedb["resume"]
10888                 mtimedb.commit()
10889
10890         def _build_exit(self, build):
10891                 if build.returncode == os.EX_OK:
10892                         self.curval += 1
10893                         merge = PackageMerge(merge=build)
10894                         if not build.build_opts.buildpkgonly and \
10895                                 build.pkg in self._deep_system_deps:
10896                                 # Since dependencies on system packages are frequently
10897                                 # unspecified, merge them only when no builds are executing.
10898                                 self._merge_wait_queue.append(merge)
10899                                 merge.addStartListener(self._system_merge_started)
10900                         else:
10901                                 merge.addExitListener(self._merge_exit)
10902                                 self._task_queues.merge.add(merge)
10903                                 self._status_display.merges = len(self._task_queues.merge)
10904                 else:
10905                         settings = build.settings
10906                         build_dir = settings.get("PORTAGE_BUILDDIR")
10907                         build_log = settings.get("PORTAGE_LOG_FILE")
10908
10909                         self._failed_pkgs.append(self._failed_pkg(
10910                                 build_dir=build_dir, build_log=build_log,
10911                                 pkg=build.pkg,
10912                                 returncode=build.returncode))
10913                         self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
10914
10915                         self._status_display.failed = len(self._failed_pkgs)
10916                         self._deallocate_config(build.settings)
10917                 self._jobs -= 1
10918                 self._status_display.running = self._jobs
10919                 self._schedule()
10920
10921         def _extract_exit(self, build):
10922                 self._build_exit(build)
10923
10924         def _task_complete(self, pkg):
10925                 self._completed_tasks.add(pkg)
10926                 self._unsatisfied_system_deps.discard(pkg)
10927                 self._choose_pkg_return_early = False
10928
10929         def _merge(self):
10930
10931                 self._add_prefetchers()
10932                 self._add_packages()
10933                 pkg_queue = self._pkg_queue
10934                 failed_pkgs = self._failed_pkgs
10935                 portage.locks._quiet = self._background
10936                 portage.elog._emerge_elog_listener = self._elog_listener
10937                 rval = os.EX_OK
10938
10939                 try:
10940                         self._main_loop()
10941                 finally:
10942                         self._main_loop_cleanup()
10943                         portage.locks._quiet = False
10944                         portage.elog._emerge_elog_listener = None
10945                         if failed_pkgs:
10946                                 rval = failed_pkgs[-1].returncode
10947
10948                 return rval
10949
10950         def _main_loop_cleanup(self):
10951                 del self._pkg_queue[:]
10952                 self._completed_tasks.clear()
10953                 self._deep_system_deps.clear()
10954                 self._unsatisfied_system_deps.clear()
10955                 self._choose_pkg_return_early = False
10956                 self._status_display.reset()
10957                 self._digraph = None
10958                 self._task_queues.fetch.clear()
10959
10960         def _choose_pkg(self):
10961                 """
10962                 Choose a task that has all it's dependencies satisfied.
10963                 """
10964
10965                 if self._choose_pkg_return_early:
10966                         return None
10967
10968                 if self._digraph is None:
10969                         if (self._jobs or self._task_queues.merge) and \
10970                                 not ("--nodeps" in self.myopts and \
10971                                 (self._max_jobs is True or self._max_jobs > 1)):
10972                                 self._choose_pkg_return_early = True
10973                                 return None
10974                         return self._pkg_queue.pop(0)
10975
10976                 if not (self._jobs or self._task_queues.merge):
10977                         return self._pkg_queue.pop(0)
10978
10979                 self._prune_digraph()
10980
10981                 chosen_pkg = None
10982                 later = set(self._pkg_queue)
10983                 for pkg in self._pkg_queue:
10984                         later.remove(pkg)
10985                         if not self._dependent_on_scheduled_merges(pkg, later):
10986                                 chosen_pkg = pkg
10987                                 break
10988
10989                 if chosen_pkg is not None:
10990                         self._pkg_queue.remove(chosen_pkg)
10991
10992                 if chosen_pkg is None:
10993                         # There's no point in searching for a package to
10994                         # choose until at least one of the existing jobs
10995                         # completes.
10996                         self._choose_pkg_return_early = True
10997
10998                 return chosen_pkg
10999
11000         def _dependent_on_scheduled_merges(self, pkg, later):
11001                 """
11002                 Traverse the subgraph of the given packages deep dependencies
11003                 to see if it contains any scheduled merges.
11004                 @param pkg: a package to check dependencies for
11005                 @type pkg: Package
11006                 @param later: packages for which dependence should be ignored
11007                         since they will be merged later than pkg anyway and therefore
11008                         delaying the merge of pkg will not result in a more optimal
11009                         merge order
11010                 @type later: set
11011                 @rtype: bool
11012                 @returns: True if the package is dependent, False otherwise.
11013                 """
11014
11015                 graph = self._digraph
11016                 completed_tasks = self._completed_tasks
11017
11018                 dependent = False
11019                 traversed_nodes = set([pkg])
11020                 direct_deps = graph.child_nodes(pkg)
11021                 node_stack = direct_deps
11022                 direct_deps = frozenset(direct_deps)
11023                 while node_stack:
11024                         node = node_stack.pop()
11025                         if node in traversed_nodes:
11026                                 continue
11027                         traversed_nodes.add(node)
11028                         if not ((node.installed and node.operation == "nomerge") or \
11029                                 (node.operation == "uninstall" and \
11030                                 node not in direct_deps) or \
11031                                 node in completed_tasks or \
11032                                 node in later):
11033                                 dependent = True
11034                                 break
11035                         node_stack.extend(graph.child_nodes(node))
11036
11037                 return dependent
11038
11039         def _allocate_config(self, root):
11040                 """
11041                 Allocate a unique config instance for a task in order
11042                 to prevent interference between parallel tasks.
11043                 """
11044                 if self._config_pool[root]:
11045                         temp_settings = self._config_pool[root].pop()
11046                 else:
11047                         temp_settings = portage.config(clone=self.pkgsettings[root])
11048                 # Since config.setcpv() isn't guaranteed to call config.reset() due to
11049                 # performance reasons, call it here to make sure all settings from the
11050                 # previous package get flushed out (such as PORTAGE_LOG_FILE).
11051                 temp_settings.reload()
11052                 temp_settings.reset()
11053                 return temp_settings
11054
11055         def _deallocate_config(self, settings):
11056                 self._config_pool[settings["ROOT"]].append(settings)
11057
11058         def _main_loop(self):
11059
11060                 # Only allow 1 job max if a restart is scheduled
11061                 # due to portage update.
11062                 if self._is_restart_scheduled() or \
11063                         self._opts_no_background.intersection(self.myopts):
11064                         self._set_max_jobs(1)
11065
11066                 merge_queue = self._task_queues.merge
11067
11068                 while self._schedule():
11069                         if self._poll_event_handlers:
11070                                 self._poll_loop()
11071
11072                 while True:
11073                         self._schedule()
11074                         if not (self._jobs or merge_queue):
11075                                 break
11076                         if self._poll_event_handlers:
11077                                 self._poll_loop()
11078
11079         def _keep_scheduling(self):
11080                 return bool(self._pkg_queue and \
11081                         not (self._failed_pkgs and not self._build_opts.fetchonly))
11082
11083         def _schedule_tasks(self):
11084
11085                 # When the number of jobs drops to zero, process all waiting merges.
11086                 if not self._jobs and self._merge_wait_queue:
11087                         for task in self._merge_wait_queue:
11088                                 task.addExitListener(self._merge_wait_exit_handler)
11089                                 self._task_queues.merge.add(task)
11090                         self._status_display.merges = len(self._task_queues.merge)
11091                         self._merge_wait_scheduled.extend(self._merge_wait_queue)
11092                         del self._merge_wait_queue[:]
11093
11094                 self._schedule_tasks_imp()
11095                 self._status_display.display()
11096
11097                 state_change = 0
11098                 for q in self._task_queues.values():
11099                         if q.schedule():
11100                                 state_change += 1
11101
11102                 # Cancel prefetchers if they're the only reason
11103                 # the main poll loop is still running.
11104                 if self._failed_pkgs and not self._build_opts.fetchonly and \
11105                         not (self._jobs or self._task_queues.merge) and \
11106                         self._task_queues.fetch:
11107                         self._task_queues.fetch.clear()
11108                         state_change += 1
11109
11110                 if state_change:
11111                         self._schedule_tasks_imp()
11112                         self._status_display.display()
11113
11114                 return self._keep_scheduling()
11115
11116         def _job_delay(self):
11117                 """
11118                 @rtype: bool
11119                 @returns: True if job scheduling should be delayed, False otherwise.
11120                 """
11121
11122                 if self._jobs and self._max_load is not None:
11123
11124                         current_time = time.time()
11125
11126                         delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
11127                         if delay > self._job_delay_max:
11128                                 delay = self._job_delay_max
11129                         if (current_time - self._previous_job_start_time) < delay:
11130                                 return True
11131
11132                 return False
11133
11134         def _schedule_tasks_imp(self):
11135                 """
11136                 @rtype: bool
11137                 @returns: True if state changed, False otherwise.
11138                 """
11139
11140                 state_change = 0
11141
11142                 while True:
11143
11144                         if not self._keep_scheduling():
11145                                 return bool(state_change)
11146
11147                         if self._choose_pkg_return_early or \
11148                                 self._merge_wait_scheduled or \
11149                                 (self._jobs and self._unsatisfied_system_deps) or \
11150                                 not self._can_add_job() or \
11151                                 self._job_delay():
11152                                 return bool(state_change)
11153
11154                         pkg = self._choose_pkg()
11155                         if pkg is None:
11156                                 return bool(state_change)
11157
11158                         state_change += 1
11159
11160                         if not pkg.installed:
11161                                 self._pkg_count.curval += 1
11162
11163                         task = self._task(pkg)
11164
11165                         if pkg.installed:
11166                                 merge = PackageMerge(merge=task)
11167                                 merge.addExitListener(self._merge_exit)
11168                                 self._task_queues.merge.add(merge)
11169
11170                         elif pkg.built:
11171                                 self._jobs += 1
11172                                 self._previous_job_start_time = time.time()
11173                                 self._status_display.running = self._jobs
11174                                 task.addExitListener(self._extract_exit)
11175                                 self._task_queues.jobs.add(task)
11176
11177                         else:
11178                                 self._jobs += 1
11179                                 self._previous_job_start_time = time.time()
11180                                 self._status_display.running = self._jobs
11181                                 task.addExitListener(self._build_exit)
11182                                 self._task_queues.jobs.add(task)
11183
11184                 return bool(state_change)
11185
11186         def _task(self, pkg):
11187
11188                 pkg_to_replace = None
11189                 if pkg.operation != "uninstall":
11190                         vardb = pkg.root_config.trees["vartree"].dbapi
11191                         previous_cpv = vardb.match(pkg.slot_atom)
11192                         if previous_cpv:
11193                                 previous_cpv = previous_cpv.pop()
11194                                 pkg_to_replace = self._pkg(previous_cpv,
11195                                         "installed", pkg.root_config, installed=True)
11196
11197                 task = MergeListItem(args_set=self._args_set,
11198                         background=self._background, binpkg_opts=self._binpkg_opts,
11199                         build_opts=self._build_opts,
11200                         config_pool=self._ConfigPool(pkg.root,
11201                         self._allocate_config, self._deallocate_config),
11202                         emerge_opts=self.myopts,
11203                         find_blockers=self._find_blockers(pkg), logger=self._logger,
11204                         mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
11205                         pkg_to_replace=pkg_to_replace,
11206                         prefetcher=self._prefetchers.get(pkg),
11207                         scheduler=self._sched_iface,
11208                         settings=self._allocate_config(pkg.root),
11209                         statusMessage=self._status_msg,
11210                         world_atom=self._world_atom)
11211
11212                 return task
11213
11214         def _failed_pkg_msg(self, failed_pkg, action, preposition):
11215                 pkg = failed_pkg.pkg
11216                 msg = "%s to %s %s" % \
11217                         (bad("Failed"), action, colorize("INFORM", pkg.cpv))
11218                 if pkg.root != "/":
11219                         msg += " %s %s" % (preposition, pkg.root)
11220
11221                 log_path = self._locate_failure_log(failed_pkg)
11222                 if log_path is not None:
11223                         msg += ", Log file:"
11224                 self._status_msg(msg)
11225
11226                 if log_path is not None:
11227                         self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
11228
11229         def _status_msg(self, msg):
11230                 """
11231                 Display a brief status message (no newlines) in the status display.
11232                 This is called by tasks to provide feedback to the user. This
11233                 delegates the resposibility of generating \r and \n control characters,
11234                 to guarantee that lines are created or erased when necessary and
11235                 appropriate.
11236
11237                 @type msg: str
11238                 @param msg: a brief status message (no newlines allowed)
11239                 """
11240                 if not self._background:
11241                         writemsg_level("\n")
11242                 self._status_display.displayMessage(msg)
11243
11244         def _save_resume_list(self):
11245                 """
11246                 Do this before verifying the ebuild Manifests since it might
11247                 be possible for the user to use --resume --skipfirst get past
11248                 a non-essential package with a broken digest.
11249                 """
11250                 mtimedb = self._mtimedb
11251                 mtimedb["resume"]["mergelist"] = [list(x) \
11252                         for x in self._mergelist \
11253                         if isinstance(x, Package) and x.operation == "merge"]
11254
11255                 mtimedb.commit()
11256
11257         def _calc_resume_list(self):
11258                 """
11259                 Use the current resume list to calculate a new one,
11260                 dropping any packages with unsatisfied deps.
11261                 @rtype: bool
11262                 @returns: True if successful, False otherwise.
11263                 """
11264                 print colorize("GOOD", "*** Resuming merge...")
11265
11266                 if self._show_list():
11267                         if "--tree" in self.myopts:
11268                                 portage.writemsg_stdout("\n" + \
11269                                         darkgreen("These are the packages that " + \
11270                                         "would be merged, in reverse order:\n\n"))
11271
11272                         else:
11273                                 portage.writemsg_stdout("\n" + \
11274                                         darkgreen("These are the packages that " + \
11275                                         "would be merged, in order:\n\n"))
11276
11277                 show_spinner = "--quiet" not in self.myopts and \
11278                         "--nodeps" not in self.myopts
11279
11280                 if show_spinner:
11281                         print "Calculating dependencies  ",
11282
11283                 myparams = create_depgraph_params(self.myopts, None)
11284                 success = False
11285                 e = None
11286                 try:
11287                         success, mydepgraph, dropped_tasks = resume_depgraph(
11288                                 self.settings, self.trees, self._mtimedb, self.myopts,
11289                                 myparams, self._spinner)
11290                 except depgraph.UnsatisfiedResumeDep, exc:
11291                         # rename variable to avoid python-3.0 error:
11292                         # SyntaxError: can not delete variable 'e' referenced in nested
11293                         #              scope
11294                         e = exc
11295                         mydepgraph = e.depgraph
11296                         dropped_tasks = set()
11297
11298                 if show_spinner:
11299                         print "\b\b... done!"
11300
11301                 if e is not None:
11302                         def unsatisfied_resume_dep_msg():
11303                                 mydepgraph.display_problems()
11304                                 out = portage.output.EOutput()
11305                                 out.eerror("One or more packages are either masked or " + \
11306                                         "have missing dependencies:")
11307                                 out.eerror("")
11308                                 indent = "  "
11309                                 show_parents = set()
11310                                 for dep in e.value:
11311                                         if dep.parent in show_parents:
11312                                                 continue
11313                                         show_parents.add(dep.parent)
11314                                         if dep.atom is None:
11315                                                 out.eerror(indent + "Masked package:")
11316                                                 out.eerror(2 * indent + str(dep.parent))
11317                                                 out.eerror("")
11318                                         else:
11319                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
11320                                                 out.eerror(2 * indent + str(dep.parent))
11321                                                 out.eerror("")
11322                                 msg = "The resume list contains packages " + \
11323                                         "that are either masked or have " + \
11324                                         "unsatisfied dependencies. " + \
11325                                         "Please restart/continue " + \
11326                                         "the operation manually, or use --skipfirst " + \
11327                                         "to skip the first package in the list and " + \
11328                                         "any other packages that may be " + \
11329                                         "masked or have missing dependencies."
11330                                 for line in textwrap.wrap(msg, 72):
11331                                         out.eerror(line)
11332                         self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
11333                         return False
11334
11335                 if success and self._show_list():
11336                         mylist = mydepgraph.altlist()
11337                         if mylist:
11338                                 if "--tree" in self.myopts:
11339                                         mylist.reverse()
11340                                 mydepgraph.display(mylist, favorites=self._favorites)
11341
11342                 if not success:
11343                         self._post_mod_echo_msgs.append(mydepgraph.display_problems)
11344                         return False
11345                 mydepgraph.display_problems()
11346
11347                 mylist = mydepgraph.altlist()
11348                 mydepgraph.break_refs(mylist)
11349                 mydepgraph.break_refs(dropped_tasks)
11350                 self._mergelist = mylist
11351                 self._set_digraph(mydepgraph.schedulerGraph())
11352
11353                 msg_width = 75
11354                 for task in dropped_tasks:
11355                         if not (isinstance(task, Package) and task.operation == "merge"):
11356                                 continue
11357                         pkg = task
11358                         msg = "emerge --keep-going:" + \
11359                                 " %s" % (pkg.cpv,)
11360                         if pkg.root != "/":
11361                                 msg += " for %s" % (pkg.root,)
11362                         msg += " dropped due to unsatisfied dependency."
11363                         for line in textwrap.wrap(msg, msg_width):
11364                                 eerror(line, phase="other", key=pkg.cpv)
11365                         settings = self.pkgsettings[pkg.root]
11366                         # Ensure that log collection from $T is disabled inside
11367                         # elog_process(), since any logs that might exist are
11368                         # not valid here.
11369                         settings.pop("T", None)
11370                         portage.elog.elog_process(pkg.cpv, settings)
11371                         self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
11372
11373                 return True
11374
11375         def _show_list(self):
11376                 myopts = self.myopts
11377                 if "--quiet" not in myopts and \
11378                         ("--ask" in myopts or "--tree" in myopts or \
11379                         "--verbose" in myopts):
11380                         return True
11381                 return False
11382
11383         def _world_atom(self, pkg):
11384                 """
11385                 Add the package to the world file, but only if
11386                 it's supposed to be added. Otherwise, do nothing.
11387                 """
11388
11389                 if set(("--buildpkgonly", "--fetchonly",
11390                         "--fetch-all-uri",
11391                         "--oneshot", "--onlydeps",
11392                         "--pretend")).intersection(self.myopts):
11393                         return
11394
11395                 if pkg.root != self.target_root:
11396                         return
11397
11398                 args_set = self._args_set
11399                 if not args_set.findAtomForPackage(pkg):
11400                         return
11401
11402                 logger = self._logger
11403                 pkg_count = self._pkg_count
11404                 root_config = pkg.root_config
11405                 world_set = root_config.sets["world"]
11406                 world_locked = False
11407                 if hasattr(world_set, "lock"):
11408                         world_set.lock()
11409                         world_locked = True
11410
11411                 try:
11412                         if hasattr(world_set, "load"):
11413                                 world_set.load() # maybe it's changed on disk
11414
11415                         atom = create_world_atom(pkg, args_set, root_config)
11416                         if atom:
11417                                 if hasattr(world_set, "add"):
11418                                         self._status_msg(('Recording %s in "world" ' + \
11419                                                 'favorites file...') % atom)
11420                                         logger.log(" === (%s of %s) Updating world file (%s)" % \
11421                                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
11422                                         world_set.add(atom)
11423                                 else:
11424                                         writemsg_level('\n!!! Unable to record %s in "world"\n' % \
11425                                                 (atom,), level=logging.WARN, noiselevel=-1)
11426                 finally:
11427                         if world_locked:
11428                                 world_set.unlock()
11429
11430         def _pkg(self, cpv, type_name, root_config, installed=False):
11431                 """
11432                 Get a package instance from the cache, or create a new
11433                 one if necessary. Raises KeyError from aux_get if it
11434                 failures for some reason (package does not exist or is
11435                 corrupt).
11436                 """
11437                 operation = "merge"
11438                 if installed:
11439                         operation = "nomerge"
11440
11441                 if self._digraph is not None:
11442                         # Reuse existing instance when available.
11443                         pkg = self._digraph.get(
11444                                 (type_name, root_config.root, cpv, operation))
11445                         if pkg is not None:
11446                                 return pkg
11447
11448                 tree_type = depgraph.pkg_tree_map[type_name]
11449                 db = root_config.trees[tree_type].dbapi
11450                 db_keys = list(self.trees[root_config.root][
11451                         tree_type].dbapi._aux_cache_keys)
11452                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11453                 pkg = Package(cpv=cpv, metadata=metadata,
11454                         root_config=root_config, installed=installed)
11455                 if type_name == "ebuild":
11456                         settings = self.pkgsettings[root_config.root]
11457                         settings.setcpv(pkg)
11458                         pkg.metadata["USE"] = settings["PORTAGE_USE"]
11459
11460                 return pkg
11461
11462 class MetadataRegen(PollScheduler):
11463
11464         def __init__(self, portdb, max_jobs=None, max_load=None):
11465                 PollScheduler.__init__(self)
11466                 self._portdb = portdb
11467
11468                 if max_jobs is None:
11469                         max_jobs = 1
11470
11471                 self._max_jobs = max_jobs
11472                 self._max_load = max_load
11473                 self._sched_iface = self._sched_iface_class(
11474                         register=self._register,
11475                         schedule=self._schedule_wait,
11476                         unregister=self._unregister)
11477
11478                 self._valid_pkgs = set()
11479                 self._process_iter = self._iter_metadata_processes()
11480                 self.returncode = os.EX_OK
11481                 self._error_count = 0
11482
11483         def _iter_metadata_processes(self):
11484                 portdb = self._portdb
11485                 valid_pkgs = self._valid_pkgs
11486                 every_cp = portdb.cp_all()
11487                 every_cp.sort(reverse=True)
11488
11489                 while every_cp:
11490                         cp = every_cp.pop()
11491                         portage.writemsg_stdout("Processing %s\n" % cp)
11492                         cpv_list = portdb.cp_list(cp)
11493                         for cpv in cpv_list:
11494                                 valid_pkgs.add(cpv)
11495                                 ebuild_path, repo_path = portdb.findname2(cpv)
11496                                 metadata_process = portdb._metadata_process(
11497                                         cpv, ebuild_path, repo_path)
11498                                 if metadata_process is None:
11499                                         continue
11500                                 yield metadata_process
11501
11502         def run(self):
11503
11504                 portdb = self._portdb
11505                 from portage.cache.cache_errors import CacheError
11506                 dead_nodes = {}
11507
11508                 for mytree in portdb.porttrees:
11509                         try:
11510                                 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11511                         except CacheError, e:
11512                                 portage.writemsg("Error listing cache entries for " + \
11513                                         "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1)
11514                                 del e
11515                                 dead_nodes = None
11516                                 break
11517
11518                 while self._schedule():
11519                         self._poll_loop()
11520
11521                 while self._jobs:
11522                         self._poll_loop()
11523
11524                 if dead_nodes:
11525                         for y in self._valid_pkgs:
11526                                 for mytree in portdb.porttrees:
11527                                         if portdb.findname2(y, mytree=mytree)[0]:
11528                                                 dead_nodes[mytree].discard(y)
11529
11530                         for mytree, nodes in dead_nodes.iteritems():
11531                                 auxdb = portdb.auxdb[mytree]
11532                                 for y in nodes:
11533                                         try:
11534                                                 del auxdb[y]
11535                                         except (KeyError, CacheError):
11536                                                 pass
11537
11538         def _schedule_tasks(self):
11539                 """
11540                 @rtype: bool
11541                 @returns: True if there may be remaining tasks to schedule,
11542                         False otherwise.
11543                 """
11544                 while self._can_add_job():
11545                         try:
11546                                 metadata_process = self._process_iter.next()
11547                         except StopIteration:
11548                                 return False
11549
11550                         self._jobs += 1
11551                         metadata_process.scheduler = self._sched_iface
11552                         metadata_process.addExitListener(self._metadata_exit)
11553                         metadata_process.start()
11554                 return True
11555
11556         def _metadata_exit(self, metadata_process):
11557                 self._jobs -= 1
11558                 if metadata_process.returncode != os.EX_OK:
11559                         self.returncode = 1
11560                         self._error_count += 1
11561                         self._valid_pkgs.discard(metadata_process.cpv)
11562                         portage.writemsg("Error processing %s, continuing...\n" % \
11563                                 (metadata_process.cpv,))
11564                 self._schedule()
11565
11566 class UninstallFailure(portage.exception.PortageException):
11567         """
11568         An instance of this class is raised by unmerge() when
11569         an uninstallation fails.
11570         """
11571         status = 1
11572         def __init__(self, *pargs):
11573                 portage.exception.PortageException.__init__(self, pargs)
11574                 if pargs:
11575                         self.status = pargs[0]
11576
11577 def unmerge(root_config, myopts, unmerge_action,
11578         unmerge_files, ldpath_mtimes, autoclean=0,
11579         clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11580         scheduler=None, writemsg_level=portage.util.writemsg_level):
11581
11582         quiet = "--quiet" in myopts
11583         settings = root_config.settings
11584         sets = root_config.sets
11585         vartree = root_config.trees["vartree"]
11586         candidate_catpkgs=[]
11587         global_unmerge=0
11588         xterm_titles = "notitles" not in settings.features
11589         out = portage.output.EOutput()
11590         pkg_cache = {}
11591         db_keys = list(vartree.dbapi._aux_cache_keys)
11592
11593         def _pkg(cpv):
11594                 pkg = pkg_cache.get(cpv)
11595                 if pkg is None:
11596                         pkg = Package(cpv=cpv, installed=True,
11597                                 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11598                                 root_config=root_config,
11599                                 type_name="installed")
11600                         pkg_cache[cpv] = pkg
11601                 return pkg
11602
11603         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11604         try:
11605                 # At least the parent needs to exist for the lock file.
11606                 portage.util.ensure_dirs(vdb_path)
11607         except portage.exception.PortageException:
11608                 pass
11609         vdb_lock = None
11610         try:
11611                 if os.access(vdb_path, os.W_OK):
11612                         vdb_lock = portage.locks.lockdir(vdb_path)
11613                 realsyslist = sets["system"].getAtoms()
11614                 syslist = []
11615                 for x in realsyslist:
11616                         mycp = portage.dep_getkey(x)
11617                         if mycp in settings.getvirtuals():
11618                                 providers = []
11619                                 for provider in settings.getvirtuals()[mycp]:
11620                                         if vartree.dbapi.match(provider):
11621                                                 providers.append(provider)
11622                                 if len(providers) == 1:
11623                                         syslist.extend(providers)
11624                         else:
11625                                 syslist.append(mycp)
11626         
11627                 mysettings = portage.config(clone=settings)
11628         
11629                 if not unmerge_files:
11630                         if unmerge_action == "unmerge":
11631                                 print
11632                                 print bold("emerge unmerge") + " can only be used with specific package names"
11633                                 print
11634                                 return 0
11635                         else:
11636                                 global_unmerge = 1
11637         
11638                 localtree = vartree
11639                 # process all arguments and add all
11640                 # valid db entries to candidate_catpkgs
11641                 if global_unmerge:
11642                         if not unmerge_files:
11643                                 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11644                 else:
11645                         #we've got command-line arguments
11646                         if not unmerge_files:
11647                                 print "\nNo packages to unmerge have been provided.\n"
11648                                 return 0
11649                         for x in unmerge_files:
11650                                 arg_parts = x.split('/')
11651                                 if x[0] not in [".","/"] and \
11652                                         arg_parts[-1][-7:] != ".ebuild":
11653                                         #possible cat/pkg or dep; treat as such
11654                                         candidate_catpkgs.append(x)
11655                                 elif unmerge_action in ["prune","clean"]:
11656                                         print "\n!!! Prune and clean do not accept individual" + \
11657                                                 " ebuilds as arguments;\n    skipping.\n"
11658                                         continue
11659                                 else:
11660                                         # it appears that the user is specifying an installed
11661                                         # ebuild and we're in "unmerge" mode, so it's ok.
11662                                         if not os.path.exists(x):
11663                                                 print "\n!!! The path '"+x+"' doesn't exist.\n"
11664                                                 return 0
11665         
11666                                         absx   = os.path.abspath(x)
11667                                         sp_absx = absx.split("/")
11668                                         if sp_absx[-1][-7:] == ".ebuild":
11669                                                 del sp_absx[-1]
11670                                                 absx = "/".join(sp_absx)
11671         
11672                                         sp_absx_len = len(sp_absx)
11673         
11674                                         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11675                                         vdb_len  = len(vdb_path)
11676         
11677                                         sp_vdb     = vdb_path.split("/")
11678                                         sp_vdb_len = len(sp_vdb)
11679         
11680                                         if not os.path.exists(absx+"/CONTENTS"):
11681                                                 print "!!! Not a valid db dir: "+str(absx)
11682                                                 return 0
11683         
11684                                         if sp_absx_len <= sp_vdb_len:
11685                                                 # The Path is shorter... so it can't be inside the vdb.
11686                                                 print sp_absx
11687                                                 print absx
11688                                                 print "\n!!!",x,"cannot be inside "+ \
11689                                                         vdb_path+"; aborting.\n"
11690                                                 return 0
11691         
11692                                         for idx in range(0,sp_vdb_len):
11693                                                 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11694                                                         print sp_absx
11695                                                         print absx
11696                                                         print "\n!!!", x, "is not inside "+\
11697                                                                 vdb_path+"; aborting.\n"
11698                                                         return 0
11699         
11700                                         print "="+"/".join(sp_absx[sp_vdb_len:])
11701                                         candidate_catpkgs.append(
11702                                                 "="+"/".join(sp_absx[sp_vdb_len:]))
11703         
11704                 newline=""
11705                 if (not "--quiet" in myopts):
11706                         newline="\n"
11707                 if settings["ROOT"] != "/":
11708                         writemsg_level(darkgreen(newline+ \
11709                                 ">>> Using system located in ROOT tree %s\n" % \
11710                                 settings["ROOT"]))
11711
11712                 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11713                         not ("--quiet" in myopts):
11714                         writemsg_level(darkgreen(newline+\
11715                                 ">>> These are the packages that would be unmerged:\n"))
11716
11717                 # Preservation of order is required for --depclean and --prune so
11718                 # that dependencies are respected. Use all_selected to eliminate
11719                 # duplicate packages since the same package may be selected by
11720                 # multiple atoms.
11721                 pkgmap = []
11722                 all_selected = set()
11723                 for x in candidate_catpkgs:
11724                         # cycle through all our candidate deps and determine
11725                         # what will and will not get unmerged
11726                         try:
11727                                 mymatch = vartree.dbapi.match(x)
11728                         except portage.exception.AmbiguousPackageName, errpkgs:
11729                                 print "\n\n!!! The short ebuild name \"" + \
11730                                         x + "\" is ambiguous.  Please specify"
11731                                 print "!!! one of the following fully-qualified " + \
11732                                         "ebuild names instead:\n"
11733                                 for i in errpkgs[0]:
11734                                         print "    " + green(i)
11735                                 print
11736                                 sys.exit(1)
11737         
11738                         if not mymatch and x[0] not in "<>=~":
11739                                 mymatch = localtree.dep_match(x)
11740                         if not mymatch:
11741                                 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11742                                         (x, unmerge_action), noiselevel=-1)
11743                                 continue
11744
11745                         pkgmap.append(
11746                                 {"protected": set(), "selected": set(), "omitted": set()})
11747                         mykey = len(pkgmap) - 1
11748                         if unmerge_action=="unmerge":
11749                                         for y in mymatch:
11750                                                 if y not in all_selected:
11751                                                         pkgmap[mykey]["selected"].add(y)
11752                                                         all_selected.add(y)
11753                         elif unmerge_action == "prune":
11754                                 if len(mymatch) == 1:
11755                                         continue
11756                                 best_version = mymatch[0]
11757                                 best_slot = vartree.getslot(best_version)
11758                                 best_counter = vartree.dbapi.cpv_counter(best_version)
11759                                 for mypkg in mymatch[1:]:
11760                                         myslot = vartree.getslot(mypkg)
11761                                         mycounter = vartree.dbapi.cpv_counter(mypkg)
11762                                         if (myslot == best_slot and mycounter > best_counter) or \
11763                                                 mypkg == portage.best([mypkg, best_version]):
11764                                                 if myslot == best_slot:
11765                                                         if mycounter < best_counter:
11766                                                                 # On slot collision, keep the one with the
11767                                                                 # highest counter since it is the most
11768                                                                 # recently installed.
11769                                                                 continue
11770                                                 best_version = mypkg
11771                                                 best_slot = myslot
11772                                                 best_counter = mycounter
11773                                 pkgmap[mykey]["protected"].add(best_version)
11774                                 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
11775                                         if mypkg != best_version and mypkg not in all_selected)
11776                                 all_selected.update(pkgmap[mykey]["selected"])
11777                         else:
11778                                 # unmerge_action == "clean"
11779                                 slotmap={}
11780                                 for mypkg in mymatch:
11781                                         if unmerge_action == "clean":
11782                                                 myslot = localtree.getslot(mypkg)
11783                                         else:
11784                                                 # since we're pruning, we don't care about slots
11785                                                 # and put all the pkgs in together
11786                                                 myslot = 0
11787                                         if myslot not in slotmap:
11788                                                 slotmap[myslot] = {}
11789                                         slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
11790
11791                                 for mypkg in vartree.dbapi.cp_list(
11792                                         portage.dep_getkey(mymatch[0])):
11793                                         myslot = vartree.getslot(mypkg)
11794                                         if myslot not in slotmap:
11795                                                 slotmap[myslot] = {}
11796                                         slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
11797
11798                                 for myslot in slotmap:
11799                                         counterkeys = slotmap[myslot].keys()
11800                                         if not counterkeys:
11801                                                 continue
11802                                         counterkeys.sort()
11803                                         pkgmap[mykey]["protected"].add(
11804                                                 slotmap[myslot][counterkeys[-1]])
11805                                         del counterkeys[-1]
11806
11807                                         for counter in counterkeys[:]:
11808                                                 mypkg = slotmap[myslot][counter]
11809                                                 if mypkg not in mymatch:
11810                                                         counterkeys.remove(counter)
11811                                                         pkgmap[mykey]["protected"].add(
11812                                                                 slotmap[myslot][counter])
11813
11814                                         #be pretty and get them in order of merge:
11815                                         for ckey in counterkeys:
11816                                                 mypkg = slotmap[myslot][ckey]
11817                                                 if mypkg not in all_selected:
11818                                                         pkgmap[mykey]["selected"].add(mypkg)
11819                                                         all_selected.add(mypkg)
11820                                         # ok, now the last-merged package
11821                                         # is protected, and the rest are selected
11822                 numselected = len(all_selected)
11823                 if global_unmerge and not numselected:
11824                         portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
11825                         return 0
11826         
11827                 if not numselected:
11828                         portage.writemsg_stdout(
11829                                 "\n>>> No packages selected for removal by " + \
11830                                 unmerge_action + "\n")
11831                         return 0
11832         finally:
11833                 if vdb_lock:
11834                         vartree.dbapi.flush_cache()
11835                         portage.locks.unlockdir(vdb_lock)
11836         
11837         from portage.sets.base import EditablePackageSet
11838         
11839         # generate a list of package sets that are directly or indirectly listed in "world",
11840         # as there is no persistent list of "installed" sets
11841         installed_sets = ["world"]
11842         stop = False
11843         pos = 0
11844         while not stop:
11845                 stop = True
11846                 pos = len(installed_sets)
11847                 for s in installed_sets[pos - 1:]:
11848                         if s not in sets:
11849                                 continue
11850                         candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
11851                         if candidates:
11852                                 stop = False
11853                                 installed_sets += candidates
11854         installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
11855         del stop, pos
11856
11857         # we don't want to unmerge packages that are still listed in user-editable package sets
11858         # listed in "world" as they would be remerged on the next update of "world" or the 
11859         # relevant package sets.
11860         unknown_sets = set()
11861         for cp in xrange(len(pkgmap)):
11862                 for cpv in pkgmap[cp]["selected"].copy():
11863                         try:
11864                                 pkg = _pkg(cpv)
11865                         except KeyError:
11866                                 # It could have been uninstalled
11867                                 # by a concurrent process.
11868                                 continue
11869
11870                         if unmerge_action != "clean" and \
11871                                 root_config.root == "/" and \
11872                                 portage.match_from_list(
11873                                 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
11874                                 msg = ("Not unmerging package %s since there is no valid " + \
11875                                 "reason for portage to unmerge itself.") % (pkg.cpv,)
11876                                 for line in textwrap.wrap(msg, 75):
11877                                         out.eerror(line)
11878                                 # adjust pkgmap so the display output is correct
11879                                 pkgmap[cp]["selected"].remove(cpv)
11880                                 all_selected.remove(cpv)
11881                                 pkgmap[cp]["protected"].add(cpv)
11882                                 continue
11883
11884                         parents = []
11885                         for s in installed_sets:
11886                                 # skip sets that the user requested to unmerge, and skip world 
11887                                 # unless we're unmerging a package set (as the package would be 
11888                                 # removed from "world" later on)
11889                                 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
11890                                         continue
11891
11892                                 if s not in sets:
11893                                         if s in unknown_sets:
11894                                                 continue
11895                                         unknown_sets.add(s)
11896                                         out = portage.output.EOutput()
11897                                         out.eerror(("Unknown set '@%s' in " + \
11898                                                 "%svar/lib/portage/world_sets") % \
11899                                                 (s, root_config.root))
11900                                         continue
11901
11902                                 # only check instances of EditablePackageSet as other classes are generally used for
11903                                 # special purposes and can be ignored here (and are usually generated dynamically, so the
11904                                 # user can't do much about them anyway)
11905                                 if isinstance(sets[s], EditablePackageSet):
11906
11907                                         # This is derived from a snippet of code in the
11908                                         # depgraph._iter_atoms_for_pkg() method.
11909                                         for atom in sets[s].iterAtomsForPackage(pkg):
11910                                                 inst_matches = vartree.dbapi.match(atom)
11911                                                 inst_matches.reverse() # descending order
11912                                                 higher_slot = None
11913                                                 for inst_cpv in inst_matches:
11914                                                         try:
11915                                                                 inst_pkg = _pkg(inst_cpv)
11916                                                         except KeyError:
11917                                                                 # It could have been uninstalled
11918                                                                 # by a concurrent process.
11919                                                                 continue
11920
11921                                                         if inst_pkg.cp != atom.cp:
11922                                                                 continue
11923                                                         if pkg >= inst_pkg:
11924                                                                 # This is descending order, and we're not
11925                                                                 # interested in any versions <= pkg given.
11926                                                                 break
11927                                                         if pkg.slot_atom != inst_pkg.slot_atom:
11928                                                                 higher_slot = inst_pkg
11929                                                                 break
11930                                                 if higher_slot is None:
11931                                                         parents.append(s)
11932                                                         break
11933                         if parents:
11934                                 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
11935                                 #print colorize("WARN", "but still listed in the following package sets:")
11936                                 #print "    %s\n" % ", ".join(parents)
11937                                 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
11938                                 print colorize("WARN", "still referenced by the following package sets:")
11939                                 print "    %s\n" % ", ".join(parents)
11940                                 # adjust pkgmap so the display output is correct
11941                                 pkgmap[cp]["selected"].remove(cpv)
11942                                 all_selected.remove(cpv)
11943                                 pkgmap[cp]["protected"].add(cpv)
11944         
11945         del installed_sets
11946
11947         numselected = len(all_selected)
11948         if not numselected:
11949                 writemsg_level(
11950                         "\n>>> No packages selected for removal by " + \
11951                         unmerge_action + "\n")
11952                 return 0
11953
11954         # Unmerge order only matters in some cases
11955         if not ordered:
11956                 unordered = {}
11957                 for d in pkgmap:
11958                         selected = d["selected"]
11959                         if not selected:
11960                                 continue
11961                         cp = portage.cpv_getkey(iter(selected).next())
11962                         cp_dict = unordered.get(cp)
11963                         if cp_dict is None:
11964                                 cp_dict = {}
11965                                 unordered[cp] = cp_dict
11966                                 for k in d:
11967                                         cp_dict[k] = set()
11968                         for k, v in d.iteritems():
11969                                 cp_dict[k].update(v)
11970                 pkgmap = [unordered[cp] for cp in sorted(unordered)]
11971
11972         for x in xrange(len(pkgmap)):
11973                 selected = pkgmap[x]["selected"]
11974                 if not selected:
11975                         continue
11976                 for mytype, mylist in pkgmap[x].iteritems():
11977                         if mytype == "selected":
11978                                 continue
11979                         mylist.difference_update(all_selected)
11980                 cp = portage.cpv_getkey(iter(selected).next())
11981                 for y in localtree.dep_match(cp):
11982                         if y not in pkgmap[x]["omitted"] and \
11983                                 y not in pkgmap[x]["selected"] and \
11984                                 y not in pkgmap[x]["protected"] and \
11985                                 y not in all_selected:
11986                                 pkgmap[x]["omitted"].add(y)
11987                 if global_unmerge and not pkgmap[x]["selected"]:
11988                         #avoid cluttering the preview printout with stuff that isn't getting unmerged
11989                         continue
11990                 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
11991                         writemsg_level(colorize("BAD","\a\n\n!!! " + \
11992                                 "'%s' is part of your system profile.\n" % cp),
11993                                 level=logging.WARNING, noiselevel=-1)
11994                         writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
11995                                 "be damaging to your system.\n\n"),
11996                                 level=logging.WARNING, noiselevel=-1)
11997                         if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
11998                                 countdown(int(settings["EMERGE_WARNING_DELAY"]),
11999                                         colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
12000                 if not quiet:
12001                         writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
12002                 else:
12003                         writemsg_level(bold(cp) + ": ", noiselevel=-1)
12004                 for mytype in ["selected","protected","omitted"]:
12005                         if not quiet:
12006                                 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
12007                         if pkgmap[x][mytype]:
12008                                 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
12009                                 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
12010                                 for pn, ver, rev in sorted_pkgs:
12011                                         if rev == "r0":
12012                                                 myversion = ver
12013                                         else:
12014                                                 myversion = ver + "-" + rev
12015                                         if mytype == "selected":
12016                                                 writemsg_level(
12017                                                         colorize("UNMERGE_WARN", myversion + " "),
12018                                                         noiselevel=-1)
12019                                         else:
12020                                                 writemsg_level(
12021                                                         colorize("GOOD", myversion + " "), noiselevel=-1)
12022                         else:
12023                                 writemsg_level("none ", noiselevel=-1)
12024                         if not quiet:
12025                                 writemsg_level("\n", noiselevel=-1)
12026                 if quiet:
12027                         writemsg_level("\n", noiselevel=-1)
12028
12029         writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
12030                 " packages are slated for removal.\n")
12031         writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
12032                         " and " + colorize("GOOD", "'omitted'") + \
12033                         " packages will not be removed.\n\n")
12034
12035         if "--pretend" in myopts:
12036                 #we're done... return
12037                 return 0
12038         if "--ask" in myopts:
12039                 if userquery("Would you like to unmerge these packages?")=="No":
12040                         # enter pretend mode for correct formatting of results
12041                         myopts["--pretend"] = True
12042                         print
12043                         print "Quitting."
12044                         print
12045                         return 0
12046         #the real unmerging begins, after a short delay....
12047         if clean_delay and not autoclean:
12048                 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
12049
12050         for x in xrange(len(pkgmap)):
12051                 for y in pkgmap[x]["selected"]:
12052                         writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
12053                         emergelog(xterm_titles, "=== Unmerging... ("+y+")")
12054                         mysplit = y.split("/")
12055                         #unmerge...
12056                         retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
12057                                 mysettings, unmerge_action not in ["clean","prune"],
12058                                 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
12059                                 scheduler=scheduler)
12060
12061                         if retval != os.EX_OK:
12062                                 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
12063                                 if raise_on_error:
12064                                         raise UninstallFailure(retval)
12065                                 sys.exit(retval)
12066                         else:
12067                                 if clean_world and hasattr(sets["world"], "cleanPackage"):
12068                                         sets["world"].cleanPackage(vartree.dbapi, y)
12069                                 emergelog(xterm_titles, " >>> unmerge success: "+y)
12070         if clean_world and hasattr(sets["world"], "remove"):
12071                 for s in root_config.setconfig.active:
12072                         sets["world"].remove(SETPREFIX+s)
12073         return 1
12074
12075 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
12076
12077         if os.path.exists("/usr/bin/install-info"):
12078                 out = portage.output.EOutput()
12079                 regen_infodirs=[]
12080                 for z in infodirs:
12081                         if z=='':
12082                                 continue
12083                         inforoot=normpath(root+z)
12084                         if os.path.isdir(inforoot):
12085                                 infomtime = long(os.stat(inforoot).st_mtime)
12086                                 if inforoot not in prev_mtimes or \
12087                                         prev_mtimes[inforoot] != infomtime:
12088                                                 regen_infodirs.append(inforoot)
12089
12090                 if not regen_infodirs:
12091                         portage.writemsg_stdout("\n")
12092                         out.einfo("GNU info directory index is up-to-date.")
12093                 else:
12094                         portage.writemsg_stdout("\n")
12095                         out.einfo("Regenerating GNU info directory index...")
12096
12097                         dir_extensions = ("", ".gz", ".bz2")
12098                         icount=0
12099                         badcount=0
12100                         errmsg = ""
12101                         for inforoot in regen_infodirs:
12102                                 if inforoot=='':
12103                                         continue
12104
12105                                 if not os.path.isdir(inforoot) or \
12106                                         not os.access(inforoot, os.W_OK):
12107                                         continue
12108
12109                                 file_list = os.listdir(inforoot)
12110                                 file_list.sort()
12111                                 dir_file = os.path.join(inforoot, "dir")
12112                                 moved_old_dir = False
12113                                 processed_count = 0
12114                                 for x in file_list:
12115                                         if x.startswith(".") or \
12116                                                 os.path.isdir(os.path.join(inforoot, x)):
12117                                                 continue
12118                                         if x.startswith("dir"):
12119                                                 skip = False
12120                                                 for ext in dir_extensions:
12121                                                         if x == "dir" + ext or \
12122                                                                 x == "dir" + ext + ".old":
12123                                                                 skip = True
12124                                                                 break
12125                                                 if skip:
12126                                                         continue
12127                                         if processed_count == 0:
12128                                                 for ext in dir_extensions:
12129                                                         try:
12130                                                                 os.rename(dir_file + ext, dir_file + ext + ".old")
12131                                                                 moved_old_dir = True
12132                                                         except EnvironmentError, e:
12133                                                                 if e.errno != errno.ENOENT:
12134                                                                         raise
12135                                                                 del e
12136                                         processed_count += 1
12137                                         myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
12138                                         existsstr="already exists, for file `"
12139                                         if myso!="":
12140                                                 if re.search(existsstr,myso):
12141                                                         # Already exists... Don't increment the count for this.
12142                                                         pass
12143                                                 elif myso[:44]=="install-info: warning: no info dir entry in ":
12144                                                         # This info file doesn't contain a DIR-header: install-info produces this
12145                                                         # (harmless) warning (the --quiet switch doesn't seem to work).
12146                                                         # Don't increment the count for this.
12147                                                         pass
12148                                                 else:
12149                                                         badcount=badcount+1
12150                                                         errmsg += myso + "\n"
12151                                         icount=icount+1
12152
12153                                 if moved_old_dir and not os.path.exists(dir_file):
12154                                         # We didn't generate a new dir file, so put the old file
12155                                         # back where it was originally found.
12156                                         for ext in dir_extensions:
12157                                                 try:
12158                                                         os.rename(dir_file + ext + ".old", dir_file + ext)
12159                                                 except EnvironmentError, e:
12160                                                         if e.errno != errno.ENOENT:
12161                                                                 raise
12162                                                         del e
12163
12164                                 # Clean dir.old cruft so that they don't prevent
12165                                 # unmerge of otherwise empty directories.
12166                                 for ext in dir_extensions:
12167                                         try:
12168                                                 os.unlink(dir_file + ext + ".old")
12169                                         except EnvironmentError, e:
12170                                                 if e.errno != errno.ENOENT:
12171                                                         raise
12172                                                 del e
12173
12174                                 #update mtime so we can potentially avoid regenerating.
12175                                 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
12176
12177                         if badcount:
12178                                 out.eerror("Processed %d info files; %d errors." % \
12179                                         (icount, badcount))
12180                                 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
12181                         else:
12182                                 if icount > 0:
12183                                         out.einfo("Processed %d info files." % (icount,))
12184
12185
12186 def display_news_notification(root_config, myopts):
12187         target_root = root_config.root
12188         trees = root_config.trees
12189         settings = trees["vartree"].settings
12190         portdb = trees["porttree"].dbapi
12191         vardb = trees["vartree"].dbapi
12192         NEWS_PATH = os.path.join("metadata", "news")
12193         UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
12194         newsReaderDisplay = False
12195         update = "--pretend" not in myopts
12196
12197         for repo in portdb.getRepositories():
12198                 unreadItems = checkUpdatedNewsItems(
12199                         portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
12200                 if unreadItems:
12201                         if not newsReaderDisplay:
12202                                 newsReaderDisplay = True
12203                                 print
12204                         print colorize("WARN", " * IMPORTANT:"),
12205                         print "%s news items need reading for repository '%s'." % (unreadItems, repo)
12206                         
12207         
12208         if newsReaderDisplay:
12209                 print colorize("WARN", " *"),
12210                 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
12211                 print
12212
12213 def display_preserved_libs(vardbapi):
12214         MAX_DISPLAY = 3
12215
12216         # Ensure the registry is consistent with existing files.
12217         vardbapi.plib_registry.pruneNonExisting()
12218
12219         if vardbapi.plib_registry.hasEntries():
12220                 print
12221                 print colorize("WARN", "!!!") + " existing preserved libs:"
12222                 plibdata = vardbapi.plib_registry.getPreservedLibs()
12223                 linkmap = vardbapi.linkmap
12224                 consumer_map = {}
12225                 owners = {}
12226                 linkmap_broken = False
12227
12228                 try:
12229                         linkmap.rebuild()
12230                 except portage.exception.CommandNotFound, e:
12231                         writemsg_level("!!! Command Not Found: %s\n" % (e,),
12232                                 level=logging.ERROR, noiselevel=-1)
12233                         del e
12234                         linkmap_broken = True
12235                 else:
12236                         search_for_owners = set()
12237                         for cpv in plibdata:
12238                                 internal_plib_keys = set(linkmap._obj_key(f) \
12239                                         for f in plibdata[cpv])
12240                                 for f in plibdata[cpv]:
12241                                         if f in consumer_map:
12242                                                 continue
12243                                         consumers = []
12244                                         for c in linkmap.findConsumers(f):
12245                                                 # Filter out any consumers that are also preserved libs
12246                                                 # belonging to the same package as the provider.
12247                                                 if linkmap._obj_key(c) not in internal_plib_keys:
12248                                                         consumers.append(c)
12249                                         consumers.sort()
12250                                         consumer_map[f] = consumers
12251                                         search_for_owners.update(consumers[:MAX_DISPLAY+1])
12252
12253                         owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
12254
12255                 for cpv in plibdata:
12256                         print colorize("WARN", ">>>") + " package: %s" % cpv
12257                         samefile_map = {}
12258                         for f in plibdata[cpv]:
12259                                 obj_key = linkmap._obj_key(f)
12260                                 alt_paths = samefile_map.get(obj_key)
12261                                 if alt_paths is None:
12262                                         alt_paths = set()
12263                                         samefile_map[obj_key] = alt_paths
12264                                 alt_paths.add(f)
12265
12266                         for alt_paths in samefile_map.itervalues():
12267                                 alt_paths = sorted(alt_paths)
12268                                 for p in alt_paths:
12269                                         print colorize("WARN", " * ") + " - %s" % (p,)
12270                                 f = alt_paths[0]
12271                                 consumers = consumer_map.get(f, [])
12272                                 for c in consumers[:MAX_DISPLAY]:
12273                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12274                                                 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
12275                                 if len(consumers) == MAX_DISPLAY + 1:
12276                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12277                                                 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
12278                                                 for x in owners.get(consumers[MAX_DISPLAY], [])))
12279                                 elif len(consumers) > MAX_DISPLAY:
12280                                         print colorize("WARN", " * ") + "     used by %d other files" % (len(consumers) - MAX_DISPLAY)
12281                 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
12282
12283
12284 def _flush_elog_mod_echo():
12285         """
12286         Dump the mod_echo output now so that our other
12287         notifications are shown last.
12288         @rtype: bool
12289         @returns: True if messages were shown, False otherwise.
12290         """
12291         messages_shown = False
12292         try:
12293                 from portage.elog import mod_echo
12294         except ImportError:
12295                 pass # happens during downgrade to a version without the module
12296         else:
12297                 messages_shown = bool(mod_echo._items)
12298                 mod_echo.finalize()
12299         return messages_shown
12300
12301 def post_emerge(root_config, myopts, mtimedb, retval):
12302         """
12303         Misc. things to run at the end of a merge session.
12304         
12305         Update Info Files
12306         Update Config Files
12307         Update News Items
12308         Commit mtimeDB
12309         Display preserved libs warnings
12310         Exit Emerge
12311
12312         @param trees: A dictionary mapping each ROOT to it's package databases
12313         @type trees: dict
12314         @param mtimedb: The mtimeDB to store data needed across merge invocations
12315         @type mtimedb: MtimeDB class instance
12316         @param retval: Emerge's return value
12317         @type retval: Int
12318         @rype: None
12319         @returns:
12320         1.  Calls sys.exit(retval)
12321         """
12322
12323         target_root = root_config.root
12324         trees = { target_root : root_config.trees }
12325         vardbapi = trees[target_root]["vartree"].dbapi
12326         settings = vardbapi.settings
12327         info_mtimes = mtimedb["info"]
12328
12329         # Load the most current variables from ${ROOT}/etc/profile.env
12330         settings.unlock()
12331         settings.reload()
12332         settings.regenerate()
12333         settings.lock()
12334
12335         config_protect = settings.get("CONFIG_PROTECT","").split()
12336         infodirs = settings.get("INFOPATH","").split(":") + \
12337                 settings.get("INFODIR","").split(":")
12338
12339         os.chdir("/")
12340
12341         if retval == os.EX_OK:
12342                 exit_msg = " *** exiting successfully."
12343         else:
12344                 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
12345         emergelog("notitles" not in settings.features, exit_msg)
12346
12347         _flush_elog_mod_echo()
12348
12349         counter_hash = settings.get("PORTAGE_COUNTER_HASH")
12350         if "--pretend" in myopts or (counter_hash is not None and \
12351                 counter_hash == vardbapi._counter_hash()):
12352                 display_news_notification(root_config, myopts)
12353                 # If vdb state has not changed then there's nothing else to do.
12354                 sys.exit(retval)
12355
12356         vdb_path = os.path.join(target_root, portage.VDB_PATH)
12357         portage.util.ensure_dirs(vdb_path)
12358         vdb_lock = None
12359         if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
12360                 vdb_lock = portage.locks.lockdir(vdb_path)
12361
12362         if vdb_lock:
12363                 try:
12364                         if "noinfo" not in settings.features:
12365                                 chk_updated_info_files(target_root,
12366                                         infodirs, info_mtimes, retval)
12367                         mtimedb.commit()
12368                 finally:
12369                         if vdb_lock:
12370                                 portage.locks.unlockdir(vdb_lock)
12371
12372         chk_updated_cfg_files(target_root, config_protect)
12373         
12374         display_news_notification(root_config, myopts)
12375         if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
12376                 display_preserved_libs(vardbapi)        
12377
12378         sys.exit(retval)
12379
12380
12381 def chk_updated_cfg_files(target_root, config_protect):
12382         if config_protect:
12383                 #number of directories with some protect files in them
12384                 procount=0
12385                 for x in config_protect:
12386                         x = os.path.join(target_root, x.lstrip(os.path.sep))
12387                         if not os.access(x, os.W_OK):
12388                                 # Avoid Permission denied errors generated
12389                                 # later by `find`.
12390                                 continue
12391                         try:
12392                                 mymode = os.lstat(x).st_mode
12393                         except OSError:
12394                                 continue
12395                         if stat.S_ISLNK(mymode):
12396                                 # We want to treat it like a directory if it
12397                                 # is a symlink to an existing directory.
12398                                 try:
12399                                         real_mode = os.stat(x).st_mode
12400                                         if stat.S_ISDIR(real_mode):
12401                                                 mymode = real_mode
12402                                 except OSError:
12403                                         pass
12404                         if stat.S_ISDIR(mymode):
12405                                 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
12406                         else:
12407                                 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
12408                                         os.path.split(x.rstrip(os.path.sep))
12409                         mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
12410                         a = commands.getstatusoutput(mycommand)
12411                         if a[0] != 0:
12412                                 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
12413                                 sys.stderr.flush()
12414                                 # Show the error message alone, sending stdout to /dev/null.
12415                                 os.system(mycommand + " 1>/dev/null")
12416                         else:
12417                                 files = a[1].split('\0')
12418                                 # split always produces an empty string as the last element
12419                                 if files and not files[-1]:
12420                                         del files[-1]
12421                                 if files:
12422                                         procount += 1
12423                                         print "\n"+colorize("WARN", " * IMPORTANT:"),
12424                                         if stat.S_ISDIR(mymode):
12425                                                  print "%d config files in '%s' need updating." % \
12426                                                         (len(files), x)
12427                                         else:
12428                                                  print "config file '%s' needs updating." % x
12429
12430                 if procount:
12431                         print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
12432                                 " section of the " + bold("emerge")
12433                         print " "+yellow("*")+" man page to learn how to update config files."
12434
12435 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12436         update=False):
12437         """
12438         Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12439         Returns the number of unread (yet relevent) items.
12440         
12441         @param portdb: a portage tree database
12442         @type portdb: pordbapi
12443         @param vardb: an installed package database
12444         @type vardb: vardbapi
12445         @param NEWS_PATH:
12446         @type NEWS_PATH:
12447         @param UNREAD_PATH:
12448         @type UNREAD_PATH:
12449         @param repo_id:
12450         @type repo_id:
12451         @rtype: Integer
12452         @returns:
12453         1.  The number of unread but relevant news items.
12454         
12455         """
12456         from portage.news import NewsManager
12457         manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12458         return manager.getUnreadItems( repo_id, update=update )
12459
12460 def insert_category_into_atom(atom, category):
12461         alphanum = re.search(r'\w', atom)
12462         if alphanum:
12463                 ret = atom[:alphanum.start()] + "%s/" % category + \
12464                         atom[alphanum.start():]
12465         else:
12466                 ret = None
12467         return ret
12468
12469 def is_valid_package_atom(x):
12470         if "/" not in x:
12471                 alphanum = re.search(r'\w', x)
12472                 if alphanum:
12473                         x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12474         return portage.isvalidatom(x)
12475
12476 def show_blocker_docs_link():
12477         print
12478         print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12479         print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12480         print
12481         print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12482         print
12483
12484 def show_mask_docs():
12485         print "For more information, see the MASKED PACKAGES section in the emerge"
12486         print "man page or refer to the Gentoo Handbook."
12487
12488 def action_sync(settings, trees, mtimedb, myopts, myaction):
12489         xterm_titles = "notitles" not in settings.features
12490         emergelog(xterm_titles, " === sync")
12491         myportdir = settings.get("PORTDIR", None)
12492         out = portage.output.EOutput()
12493         if not myportdir:
12494                 sys.stderr.write("!!! PORTDIR is undefined.  Is /etc/make.globals missing?\n")
12495                 sys.exit(1)
12496         if myportdir[-1]=="/":
12497                 myportdir=myportdir[:-1]
12498         try:
12499                 st = os.stat(myportdir)
12500         except OSError:
12501                 st = None
12502         if st is None:
12503                 print ">>>",myportdir,"not found, creating it."
12504                 os.makedirs(myportdir,0755)
12505                 st = os.stat(myportdir)
12506
12507         spawn_kwargs = {}
12508         spawn_kwargs["env"] = settings.environ()
12509         if 'usersync' in settings.features and \
12510                 portage.data.secpass >= 2 and \
12511                 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12512                 st.st_gid != os.getgid() and st.st_mode & 0070):
12513                 try:
12514                         homedir = pwd.getpwuid(st.st_uid).pw_dir
12515                 except KeyError:
12516                         pass
12517                 else:
12518                         # Drop privileges when syncing, in order to match
12519                         # existing uid/gid settings.
12520                         spawn_kwargs["uid"]    = st.st_uid
12521                         spawn_kwargs["gid"]    = st.st_gid
12522                         spawn_kwargs["groups"] = [st.st_gid]
12523                         spawn_kwargs["env"]["HOME"] = homedir
12524                         umask = 0002
12525                         if not st.st_mode & 0020:
12526                                 umask = umask | 0020
12527                         spawn_kwargs["umask"] = umask
12528
12529         syncuri = settings.get("SYNC", "").strip()
12530         if not syncuri:
12531                 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12532                         noiselevel=-1, level=logging.ERROR)
12533                 return 1
12534
12535         vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12536         vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12537
12538         os.umask(0022)
12539         dosyncuri = syncuri
12540         updatecache_flg = False
12541         if myaction == "metadata":
12542                 print "skipping sync"
12543                 updatecache_flg = True
12544         elif ".git" in vcs_dirs:
12545                 # Update existing git repository, and ignore the syncuri. We are
12546                 # going to trust the user and assume that the user is in the branch
12547                 # that he/she wants updated. We'll let the user manage branches with
12548                 # git directly.
12549                 if portage.process.find_binary("git") is None:
12550                         msg = ["Command not found: git",
12551                         "Type \"emerge dev-util/git\" to enable git support."]
12552                         for l in msg:
12553                                 writemsg_level("!!! %s\n" % l,
12554                                         level=logging.ERROR, noiselevel=-1)
12555                         return 1
12556                 msg = ">>> Starting git pull in %s..." % myportdir
12557                 emergelog(xterm_titles, msg )
12558                 writemsg_level(msg + "\n")
12559                 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12560                         (portage._shell_quote(myportdir),), **spawn_kwargs)
12561                 if exitcode != os.EX_OK:
12562                         msg = "!!! git pull error in %s." % myportdir
12563                         emergelog(xterm_titles, msg)
12564                         writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12565                         return exitcode
12566                 msg = ">>> Git pull in %s successful" % myportdir
12567                 emergelog(xterm_titles, msg)
12568                 writemsg_level(msg + "\n")
12569                 exitcode = git_sync_timestamps(settings, myportdir)
12570                 if exitcode == os.EX_OK:
12571                         updatecache_flg = True
12572         elif syncuri[:8]=="rsync://":
12573                 for vcs_dir in vcs_dirs:
12574                         writemsg_level(("!!! %s appears to be under revision " + \
12575                                 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12576                                 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12577                         return 1
12578                 if not os.path.exists("/usr/bin/rsync"):
12579                         print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12580                         print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12581                         sys.exit(1)
12582                 mytimeout=180
12583
12584                 rsync_opts = []
12585                 if settings["PORTAGE_RSYNC_OPTS"] == "":
12586                         portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12587                         rsync_opts.extend([
12588                                 "--recursive",    # Recurse directories
12589                                 "--links",        # Consider symlinks
12590                                 "--safe-links",   # Ignore links outside of tree
12591                                 "--perms",        # Preserve permissions
12592                                 "--times",        # Preserive mod times
12593                                 "--compress",     # Compress the data transmitted
12594                                 "--force",        # Force deletion on non-empty dirs
12595                                 "--whole-file",   # Don't do block transfers, only entire files
12596                                 "--delete",       # Delete files that aren't in the master tree
12597                                 "--stats",        # Show final statistics about what was transfered
12598                                 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12599                                 "--exclude=/distfiles",   # Exclude distfiles from consideration
12600                                 "--exclude=/local",       # Exclude local     from consideration
12601                                 "--exclude=/packages",    # Exclude packages  from consideration
12602                         ])
12603
12604                 else:
12605                         # The below validation is not needed when using the above hardcoded
12606                         # defaults.
12607
12608                         portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12609                         rsync_opts.extend(
12610                                 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
12611                         for opt in ("--recursive", "--times"):
12612                                 if opt not in rsync_opts:
12613                                         portage.writemsg(yellow("WARNING:") + " adding required option " + \
12614                                         "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12615                                         rsync_opts.append(opt)
12616         
12617                         for exclude in ("distfiles", "local", "packages"):
12618                                 opt = "--exclude=/%s" % exclude
12619                                 if opt not in rsync_opts:
12620                                         portage.writemsg(yellow("WARNING:") + \
12621                                         " adding required option %s not included in "  % opt + \
12622                                         "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12623                                         rsync_opts.append(opt)
12624         
12625                         if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12626                                 def rsync_opt_startswith(opt_prefix):
12627                                         for x in rsync_opts:
12628                                                 if x.startswith(opt_prefix):
12629                                                         return True
12630                                         return False
12631
12632                                 if not rsync_opt_startswith("--timeout="):
12633                                         rsync_opts.append("--timeout=%d" % mytimeout)
12634
12635                                 for opt in ("--compress", "--whole-file"):
12636                                         if opt not in rsync_opts:
12637                                                 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12638                                                 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12639                                                 rsync_opts.append(opt)
12640
12641                 if "--quiet" in myopts:
12642                         rsync_opts.append("--quiet")    # Shut up a lot
12643                 else:
12644                         rsync_opts.append("--verbose")  # Print filelist
12645
12646                 if "--verbose" in myopts:
12647                         rsync_opts.append("--progress")  # Progress meter for each file
12648
12649                 if "--debug" in myopts:
12650                         rsync_opts.append("--checksum") # Force checksum on all files
12651
12652                 # Real local timestamp file.
12653                 servertimestampfile = os.path.join(
12654                         myportdir, "metadata", "timestamp.chk")
12655
12656                 content = portage.util.grabfile(servertimestampfile)
12657                 mytimestamp = 0
12658                 if content:
12659                         try:
12660                                 mytimestamp = time.mktime(time.strptime(content[0],
12661                                         "%a, %d %b %Y %H:%M:%S +0000"))
12662                         except (OverflowError, ValueError):
12663                                 pass
12664                 del content
12665
12666                 try:
12667                         rsync_initial_timeout = \
12668                                 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12669                 except ValueError:
12670                         rsync_initial_timeout = 15
12671
12672                 try:
12673                         maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12674                 except SystemExit, e:
12675                         raise # Needed else can't exit
12676                 except:
12677                         maxretries=3 #default number of retries
12678
12679                 retries=0
12680                 user_name, hostname, port = re.split(
12681                         "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12682                 if port is None:
12683                         port=""
12684                 if user_name is None:
12685                         user_name=""
12686                 updatecache_flg=True
12687                 all_rsync_opts = set(rsync_opts)
12688                 extra_rsync_opts = shlex.split(
12689                         settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
12690                 all_rsync_opts.update(extra_rsync_opts)
12691                 family = socket.AF_INET
12692                 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12693                         family = socket.AF_INET
12694                 elif socket.has_ipv6 and \
12695                         ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12696                         family = socket.AF_INET6
12697                 ips=[]
12698                 SERVER_OUT_OF_DATE = -1
12699                 EXCEEDED_MAX_RETRIES = -2
12700                 while (1):
12701                         if ips:
12702                                 del ips[0]
12703                         if ips==[]:
12704                                 try:
12705                                         for addrinfo in socket.getaddrinfo(
12706                                                 hostname, None, family, socket.SOCK_STREAM):
12707                                                 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12708                                                         # IPv6 addresses need to be enclosed in square brackets
12709                                                         ips.append("[%s]" % addrinfo[4][0])
12710                                                 else:
12711                                                         ips.append(addrinfo[4][0])
12712                                         from random import shuffle
12713                                         shuffle(ips)
12714                                 except SystemExit, e:
12715                                         raise # Needed else can't exit
12716                                 except Exception, e:
12717                                         print "Notice:",str(e)
12718                                         dosyncuri=syncuri
12719
12720                         if ips:
12721                                 try:
12722                                         dosyncuri = syncuri.replace(
12723                                                 "//" + user_name + hostname + port + "/",
12724                                                 "//" + user_name + ips[0] + port + "/", 1)
12725                                 except SystemExit, e:
12726                                         raise # Needed else can't exit
12727                                 except Exception, e:
12728                                         print "Notice:",str(e)
12729                                         dosyncuri=syncuri
12730
12731                         if (retries==0):
12732                                 if "--ask" in myopts:
12733                                         if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12734                                                 print
12735                                                 print "Quitting."
12736                                                 print
12737                                                 sys.exit(0)
12738                                 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12739                                 if "--quiet" not in myopts:
12740                                         print ">>> Starting rsync with "+dosyncuri+"..."
12741                         else:
12742                                 emergelog(xterm_titles,
12743                                         ">>> Starting retry %d of %d with %s" % \
12744                                                 (retries,maxretries,dosyncuri))
12745                                 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
12746
12747                         if mytimestamp != 0 and "--quiet" not in myopts:
12748                                 print ">>> Checking server timestamp ..."
12749
12750                         rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
12751
12752                         if "--debug" in myopts:
12753                                 print rsynccommand
12754
12755                         exitcode = os.EX_OK
12756                         servertimestamp = 0
12757                         # Even if there's no timestamp available locally, fetch the
12758                         # timestamp anyway as an initial probe to verify that the server is
12759                         # responsive.  This protects us from hanging indefinitely on a
12760                         # connection attempt to an unresponsive server which rsync's
12761                         # --timeout option does not prevent.
12762                         if True:
12763                                 # Temporary file for remote server timestamp comparison.
12764                                 from tempfile import mkstemp
12765                                 fd, tmpservertimestampfile = mkstemp()
12766                                 os.close(fd)
12767                                 mycommand = rsynccommand[:]
12768                                 mycommand.append(dosyncuri.rstrip("/") + \
12769                                         "/metadata/timestamp.chk")
12770                                 mycommand.append(tmpservertimestampfile)
12771                                 content = None
12772                                 mypids = []
12773                                 try:
12774                                         def timeout_handler(signum, frame):
12775                                                 raise portage.exception.PortageException("timed out")
12776                                         signal.signal(signal.SIGALRM, timeout_handler)
12777                                         # Timeout here in case the server is unresponsive.  The
12778                                         # --timeout rsync option doesn't apply to the initial
12779                                         # connection attempt.
12780                                         if rsync_initial_timeout:
12781                                                 signal.alarm(rsync_initial_timeout)
12782                                         try:
12783                                                 mypids.extend(portage.process.spawn(
12784                                                         mycommand, env=settings.environ(), returnpid=True))
12785                                                 exitcode = os.waitpid(mypids[0], 0)[1]
12786                                                 content = portage.grabfile(tmpservertimestampfile)
12787                                         finally:
12788                                                 if rsync_initial_timeout:
12789                                                         signal.alarm(0)
12790                                                 try:
12791                                                         os.unlink(tmpservertimestampfile)
12792                                                 except OSError:
12793                                                         pass
12794                                 except portage.exception.PortageException, e:
12795                                         # timed out
12796                                         print e
12797                                         del e
12798                                         if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
12799                                                 os.kill(mypids[0], signal.SIGTERM)
12800                                                 os.waitpid(mypids[0], 0)
12801                                         # This is the same code rsync uses for timeout.
12802                                         exitcode = 30
12803                                 else:
12804                                         if exitcode != os.EX_OK:
12805                                                 if exitcode & 0xff:
12806                                                         exitcode = (exitcode & 0xff) << 8
12807                                                 else:
12808                                                         exitcode = exitcode >> 8
12809                                 if mypids:
12810                                         portage.process.spawned_pids.remove(mypids[0])
12811                                 if content:
12812                                         try:
12813                                                 servertimestamp = time.mktime(time.strptime(
12814                                                         content[0], "%a, %d %b %Y %H:%M:%S +0000"))
12815                                         except (OverflowError, ValueError):
12816                                                 pass
12817                                 del mycommand, mypids, content
12818                         if exitcode == os.EX_OK:
12819                                 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
12820                                         emergelog(xterm_titles,
12821                                                 ">>> Cancelling sync -- Already current.")
12822                                         print
12823                                         print ">>>"
12824                                         print ">>> Timestamps on the server and in the local repository are the same."
12825                                         print ">>> Cancelling all further sync action. You are already up to date."
12826                                         print ">>>"
12827                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
12828                                         print ">>>"
12829                                         print
12830                                         sys.exit(0)
12831                                 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
12832                                         emergelog(xterm_titles,
12833                                                 ">>> Server out of date: %s" % dosyncuri)
12834                                         print
12835                                         print ">>>"
12836                                         print ">>> SERVER OUT OF DATE: %s" % dosyncuri
12837                                         print ">>>"
12838                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
12839                                         print ">>>"
12840                                         print
12841                                         exitcode = SERVER_OUT_OF_DATE
12842                                 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
12843                                         # actual sync
12844                                         mycommand = rsynccommand + [dosyncuri+"/", myportdir]
12845                                         exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
12846                                         if exitcode in [0,1,3,4,11,14,20,21]:
12847                                                 break
12848                         elif exitcode in [1,3,4,11,14,20,21]:
12849                                 break
12850                         else:
12851                                 # Code 2 indicates protocol incompatibility, which is expected
12852                                 # for servers with protocol < 29 that don't support
12853                                 # --prune-empty-directories.  Retry for a server that supports
12854                                 # at least rsync protocol version 29 (>=rsync-2.6.4).
12855                                 pass
12856
12857                         retries=retries+1
12858
12859                         if retries<=maxretries:
12860                                 print ">>> Retrying..."
12861                                 time.sleep(11)
12862                         else:
12863                                 # over retries
12864                                 # exit loop
12865                                 updatecache_flg=False
12866                                 exitcode = EXCEEDED_MAX_RETRIES
12867                                 break
12868
12869                 if (exitcode==0):
12870                         emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
12871                 elif exitcode == SERVER_OUT_OF_DATE:
12872                         sys.exit(1)
12873                 elif exitcode == EXCEEDED_MAX_RETRIES:
12874                         sys.stderr.write(
12875                                 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
12876                         sys.exit(1)
12877                 elif (exitcode>0):
12878                         msg = []
12879                         if exitcode==1:
12880                                 msg.append("Rsync has reported that there is a syntax error. Please ensure")
12881                                 msg.append("that your SYNC statement is proper.")
12882                                 msg.append("SYNC=" + settings["SYNC"])
12883                         elif exitcode==11:
12884                                 msg.append("Rsync has reported that there is a File IO error. Normally")
12885                                 msg.append("this means your disk is full, but can be caused by corruption")
12886                                 msg.append("on the filesystem that contains PORTDIR. Please investigate")
12887                                 msg.append("and try again after the problem has been fixed.")
12888                                 msg.append("PORTDIR=" + settings["PORTDIR"])
12889                         elif exitcode==20:
12890                                 msg.append("Rsync was killed before it finished.")
12891                         else:
12892                                 msg.append("Rsync has not successfully finished. It is recommended that you keep")
12893                                 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
12894                                 msg.append("to use rsync due to firewall or other restrictions. This should be a")
12895                                 msg.append("temporary problem unless complications exist with your network")
12896                                 msg.append("(and possibly your system's filesystem) configuration.")
12897                         for line in msg:
12898                                 out.eerror(line)
12899                         sys.exit(exitcode)
12900         elif syncuri[:6]=="cvs://":
12901                 if not os.path.exists("/usr/bin/cvs"):
12902                         print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
12903                         print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
12904                         sys.exit(1)
12905                 cvsroot=syncuri[6:]
12906                 cvsdir=os.path.dirname(myportdir)
12907                 if not os.path.exists(myportdir+"/CVS"):
12908                         #initial checkout
12909                         print ">>> Starting initial cvs checkout with "+syncuri+"..."
12910                         if os.path.exists(cvsdir+"/gentoo-x86"):
12911                                 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
12912                                 sys.exit(1)
12913                         try:
12914                                 os.rmdir(myportdir)
12915                         except OSError, e:
12916                                 if e.errno != errno.ENOENT:
12917                                         sys.stderr.write(
12918                                                 "!!! existing '%s' directory; exiting.\n" % myportdir)
12919                                         sys.exit(1)
12920                                 del e
12921                         if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
12922                                 print "!!! cvs checkout error; exiting."
12923                                 sys.exit(1)
12924                         os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
12925                 else:
12926                         #cvs update
12927                         print ">>> Starting cvs update with "+syncuri+"..."
12928                         retval = portage.process.spawn_bash(
12929                                 "cd %s; cvs -z0 -q update -dP" % \
12930                                 (portage._shell_quote(myportdir),), **spawn_kwargs)
12931                         if retval != os.EX_OK:
12932                                 sys.exit(retval)
12933                 dosyncuri = syncuri
12934         else:
12935                 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
12936                         noiselevel=-1, level=logging.ERROR)
12937                 return 1
12938
12939         if updatecache_flg and  \
12940                 myaction != "metadata" and \
12941                 "metadata-transfer" not in settings.features:
12942                 updatecache_flg = False
12943
12944         # Reload the whole config from scratch.
12945         settings, trees, mtimedb = load_emerge_config(trees=trees)
12946         root_config = trees[settings["ROOT"]]["root_config"]
12947         portdb = trees[settings["ROOT"]]["porttree"].dbapi
12948
12949         if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
12950                 action_metadata(settings, portdb, myopts)
12951
12952         if portage._global_updates(trees, mtimedb["updates"]):
12953                 mtimedb.commit()
12954                 # Reload the whole config from scratch.
12955                 settings, trees, mtimedb = load_emerge_config(trees=trees)
12956                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12957                 root_config = trees[settings["ROOT"]]["root_config"]
12958
12959         mybestpv = portdb.xmatch("bestmatch-visible",
12960                 portage.const.PORTAGE_PACKAGE_ATOM)
12961         mypvs = portage.best(
12962                 trees[settings["ROOT"]]["vartree"].dbapi.match(
12963                 portage.const.PORTAGE_PACKAGE_ATOM))
12964
12965         chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
12966
12967         if myaction != "metadata":
12968                 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
12969                         retval = portage.process.spawn(
12970                                 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
12971                                 dosyncuri], env=settings.environ())
12972                         if retval != os.EX_OK:
12973                                 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
12974
12975         if(mybestpv != mypvs) and not "--quiet" in myopts:
12976                 print
12977                 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
12978                 print red(" * ")+"that you update portage now, before any other packages are updated."
12979                 print
12980                 print red(" * ")+"To update portage, run 'emerge portage' now."
12981                 print
12982         
12983         display_news_notification(root_config, myopts)
12984         return os.EX_OK
12985
12986 def git_sync_timestamps(settings, portdir):
12987         """
12988         Since git doesn't preserve timestamps, synchronize timestamps between
12989         entries and ebuilds/eclasses. Assume the cache has the correct timestamp
12990         for a given file as long as the file in the working tree is not modified
12991         (relative to HEAD).
12992         """
12993         cache_dir = os.path.join(portdir, "metadata", "cache")
12994         if not os.path.isdir(cache_dir):
12995                 return os.EX_OK
12996         writemsg_level(">>> Synchronizing timestamps...\n")
12997
12998         from portage.cache.cache_errors import CacheError
12999         try:
13000                 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
13001                         portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13002         except CacheError, e:
13003                 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
13004                         level=logging.ERROR, noiselevel=-1)
13005                 return 1
13006
13007         ec_dir = os.path.join(portdir, "eclass")
13008         try:
13009                 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
13010                         if f.endswith(".eclass"))
13011         except OSError, e:
13012                 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
13013                         level=logging.ERROR, noiselevel=-1)
13014                 return 1
13015
13016         args = [portage.const.BASH_BINARY, "-c",
13017                 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
13018                 portage._shell_quote(portdir)]
13019         import subprocess
13020         proc = subprocess.Popen(args, stdout=subprocess.PIPE)
13021         modified_files = set(l.rstrip("\n") for l in proc.stdout)
13022         rval = proc.wait()
13023         if rval != os.EX_OK:
13024                 return rval
13025
13026         modified_eclasses = set(ec for ec in ec_names \
13027                 if os.path.join("eclass", ec + ".eclass") in modified_files)
13028
13029         updated_ec_mtimes = {}
13030
13031         for cpv in cache_db:
13032                 cpv_split = portage.catpkgsplit(cpv)
13033                 if cpv_split is None:
13034                         writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
13035                                 level=logging.ERROR, noiselevel=-1)
13036                         continue
13037
13038                 cat, pn, ver, rev = cpv_split
13039                 cat, pf = portage.catsplit(cpv)
13040                 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
13041                 if relative_eb_path in modified_files:
13042                         continue
13043
13044                 try:
13045                         cache_entry = cache_db[cpv]
13046                         eb_mtime = cache_entry.get("_mtime_")
13047                         ec_mtimes = cache_entry.get("_eclasses_")
13048                 except KeyError:
13049                         writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
13050                                 level=logging.ERROR, noiselevel=-1)
13051                         continue
13052                 except CacheError, e:
13053                         writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
13054                                 (cpv, e), level=logging.ERROR, noiselevel=-1)
13055                         continue
13056
13057                 if eb_mtime is None:
13058                         writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
13059                                 level=logging.ERROR, noiselevel=-1)
13060                         continue
13061
13062                 try:
13063                         eb_mtime = long(eb_mtime)
13064                 except ValueError:
13065                         writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
13066                                 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
13067                         continue
13068
13069                 if ec_mtimes is None:
13070                         writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
13071                                 level=logging.ERROR, noiselevel=-1)
13072                         continue
13073
13074                 if modified_eclasses.intersection(ec_mtimes):
13075                         continue
13076
13077                 missing_eclasses = set(ec_mtimes).difference(ec_names)
13078                 if missing_eclasses:
13079                         writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
13080                                 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
13081                                 noiselevel=-1)
13082                         continue
13083
13084                 eb_path = os.path.join(portdir, relative_eb_path)
13085                 try:
13086                         current_eb_mtime = os.stat(eb_path)
13087                 except OSError:
13088                         writemsg_level("!!! Missing ebuild: %s\n" % \
13089                                 (cpv,), level=logging.ERROR, noiselevel=-1)
13090                         continue
13091
13092                 inconsistent = False
13093                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13094                         updated_mtime = updated_ec_mtimes.get(ec)
13095                         if updated_mtime is not None and updated_mtime != ec_mtime:
13096                                 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
13097                                         (cpv, ec), level=logging.ERROR, noiselevel=-1)
13098                                 inconsistent = True
13099                                 break
13100
13101                 if inconsistent:
13102                         continue
13103
13104                 if current_eb_mtime != eb_mtime:
13105                         os.utime(eb_path, (eb_mtime, eb_mtime))
13106
13107                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13108                         if ec in updated_ec_mtimes:
13109                                 continue
13110                         ec_path = os.path.join(ec_dir, ec + ".eclass")
13111                         current_mtime = long(os.stat(ec_path).st_mtime)
13112                         if current_mtime != ec_mtime:
13113                                 os.utime(ec_path, (ec_mtime, ec_mtime))
13114                         updated_ec_mtimes[ec] = ec_mtime
13115
13116         return os.EX_OK
13117
13118 def action_metadata(settings, portdb, myopts):
13119         portage.writemsg_stdout("\n>>> Updating Portage cache:      ")
13120         old_umask = os.umask(0002)
13121         cachedir = os.path.normpath(settings.depcachedir)
13122         if cachedir in ["/",    "/bin", "/dev",  "/etc",  "/home",
13123                                         "/lib", "/opt", "/proc", "/root", "/sbin",
13124                                         "/sys", "/tmp", "/usr",  "/var"]:
13125                 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
13126                         "ROOT DIRECTORY ON YOUR SYSTEM."
13127                 print >> sys.stderr, \
13128                         "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
13129                 sys.exit(73)
13130         if not os.path.exists(cachedir):
13131                 os.mkdir(cachedir)
13132
13133         ec = portage.eclass_cache.cache(portdb.porttree_root)
13134         myportdir = os.path.realpath(settings["PORTDIR"])
13135         cm = settings.load_best_module("portdbapi.metadbmodule")(
13136                 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13137
13138         from portage.cache import util
13139
13140         class percentage_noise_maker(util.quiet_mirroring):
13141                 def __init__(self, dbapi):
13142                         self.dbapi = dbapi
13143                         self.cp_all = dbapi.cp_all()
13144                         l = len(self.cp_all)
13145                         self.call_update_min = 100000000
13146                         self.min_cp_all = l/100.0
13147                         self.count = 1
13148                         self.pstr = ''
13149
13150                 def __iter__(self):
13151                         for x in self.cp_all:
13152                                 self.count += 1
13153                                 if self.count > self.min_cp_all:
13154                                         self.call_update_min = 0
13155                                         self.count = 0
13156                                 for y in self.dbapi.cp_list(x):
13157                                         yield y
13158                         self.call_update_mine = 0
13159
13160                 def update(self, *arg):
13161                         try:                            self.pstr = int(self.pstr) + 1
13162                         except ValueError:      self.pstr = 1
13163                         sys.stdout.write("%s%i%%" % \
13164                                 ("\b" * (len(str(self.pstr))+1), self.pstr))
13165                         sys.stdout.flush()
13166                         self.call_update_min = 10000000
13167
13168                 def finish(self, *arg):
13169                         sys.stdout.write("\b\b\b\b100%\n")
13170                         sys.stdout.flush()
13171
13172         if "--quiet" in myopts:
13173                 def quicky_cpv_generator(cp_all_list):
13174                         for x in cp_all_list:
13175                                 for y in portdb.cp_list(x):
13176                                         yield y
13177                 source = quicky_cpv_generator(portdb.cp_all())
13178                 noise_maker = portage.cache.util.quiet_mirroring()
13179         else:
13180                 noise_maker = source = percentage_noise_maker(portdb)
13181         portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
13182                 eclass_cache=ec, verbose_instance=noise_maker)
13183
13184         sys.stdout.flush()
13185         os.umask(old_umask)
13186
13187 def action_regen(settings, portdb, max_jobs, max_load):
13188         xterm_titles = "notitles" not in settings.features
13189         emergelog(xterm_titles, " === regen")
13190         #regenerate cache entries
13191         portage.writemsg_stdout("Regenerating cache entries...\n")
13192         try:
13193                 os.close(sys.stdin.fileno())
13194         except SystemExit, e:
13195                 raise # Needed else can't exit
13196         except:
13197                 pass
13198         sys.stdout.flush()
13199
13200         regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
13201         regen.run()
13202
13203         portage.writemsg_stdout("done!\n")
13204         return regen.returncode
13205
13206 def action_config(settings, trees, myopts, myfiles):
13207         if len(myfiles) != 1:
13208                 print red("!!! config can only take a single package atom at this time\n")
13209                 sys.exit(1)
13210         if not is_valid_package_atom(myfiles[0]):
13211                 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
13212                         noiselevel=-1)
13213                 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
13214                 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
13215                 sys.exit(1)
13216         print
13217         try:
13218                 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
13219         except portage.exception.AmbiguousPackageName, e:
13220                 # Multiple matches thrown from cpv_expand
13221                 pkgs = e.args[0]
13222         if len(pkgs) == 0:
13223                 print "No packages found.\n"
13224                 sys.exit(0)
13225         elif len(pkgs) > 1:
13226                 if "--ask" in myopts:
13227                         options = []
13228                         print "Please select a package to configure:"
13229                         idx = 0
13230                         for pkg in pkgs:
13231                                 idx += 1
13232                                 options.append(str(idx))
13233                                 print options[-1]+") "+pkg
13234                         print "X) Cancel"
13235                         options.append("X")
13236                         idx = userquery("Selection?", options)
13237                         if idx == "X":
13238                                 sys.exit(0)
13239                         pkg = pkgs[int(idx)-1]
13240                 else:
13241                         print "The following packages available:"
13242                         for pkg in pkgs:
13243                                 print "* "+pkg
13244                         print "\nPlease use a specific atom or the --ask option."
13245                         sys.exit(1)
13246         else:
13247                 pkg = pkgs[0]
13248
13249         print
13250         if "--ask" in myopts:
13251                 if userquery("Ready to configure "+pkg+"?") == "No":
13252                         sys.exit(0)
13253         else:
13254                 print "Configuring pkg..."
13255         print
13256         ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
13257         mysettings = portage.config(clone=settings)
13258         vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
13259         debug = mysettings.get("PORTAGE_DEBUG") == "1"
13260         retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
13261                 mysettings,
13262                 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
13263                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
13264         if retval == os.EX_OK:
13265                 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
13266                         mysettings, debug=debug, mydbapi=vardb, tree="vartree")
13267         print
13268
13269 def action_info(settings, trees, myopts, myfiles):
13270         print getportageversion(settings["PORTDIR"], settings["ROOT"],
13271                 settings.profile_path, settings["CHOST"],
13272                 trees[settings["ROOT"]]["vartree"].dbapi)
13273         header_width = 65
13274         header_title = "System Settings"
13275         if myfiles:
13276                 print header_width * "="
13277                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13278         print header_width * "="
13279         print "System uname: "+platform.platform(aliased=1)
13280
13281         lastSync = portage.grabfile(os.path.join(
13282                 settings["PORTDIR"], "metadata", "timestamp.chk"))
13283         print "Timestamp of tree:",
13284         if lastSync:
13285                 print lastSync[0]
13286         else:
13287                 print "Unknown"
13288
13289         output=commands.getstatusoutput("distcc --version")
13290         if not output[0]:
13291                 print str(output[1].split("\n",1)[0]),
13292                 if "distcc" in settings.features:
13293                         print "[enabled]"
13294                 else:
13295                         print "[disabled]"
13296
13297         output=commands.getstatusoutput("ccache -V")
13298         if not output[0]:
13299                 print str(output[1].split("\n",1)[0]),
13300                 if "ccache" in settings.features:
13301                         print "[enabled]"
13302                 else:
13303                         print "[disabled]"
13304
13305         myvars  = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
13306                    "sys-devel/binutils", "sys-devel/libtool",  "dev-lang/python"]
13307         myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
13308         myvars  = portage.util.unique_array(myvars)
13309         myvars.sort()
13310
13311         for x in myvars:
13312                 if portage.isvalidatom(x):
13313                         pkg_matches = trees["/"]["vartree"].dbapi.match(x)
13314                         pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
13315                         pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
13316                         pkgs = []
13317                         for pn, ver, rev in pkg_matches:
13318                                 if rev != "r0":
13319                                         pkgs.append(ver + "-" + rev)
13320                                 else:
13321                                         pkgs.append(ver)
13322                         if pkgs:
13323                                 pkgs = ", ".join(pkgs)
13324                                 print "%-20s %s" % (x+":", pkgs)
13325                 else:
13326                         print "%-20s %s" % (x+":", "[NOT VALID]")
13327
13328         libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
13329
13330         if "--verbose" in myopts:
13331                 myvars=settings.keys()
13332         else:
13333                 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
13334                           'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
13335                           'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
13336                           'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
13337
13338                 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
13339
13340         myvars = portage.util.unique_array(myvars)
13341         unset_vars = []
13342         myvars.sort()
13343         for x in myvars:
13344                 if x in settings:
13345                         if x != "USE":
13346                                 print '%s="%s"' % (x, settings[x])
13347                         else:
13348                                 use = set(settings["USE"].split())
13349                                 use_expand = settings["USE_EXPAND"].split()
13350                                 use_expand.sort()
13351                                 for varname in use_expand:
13352                                         flag_prefix = varname.lower() + "_"
13353                                         for f in list(use):
13354                                                 if f.startswith(flag_prefix):
13355                                                         use.remove(f)
13356                                 use = list(use)
13357                                 use.sort()
13358                                 print 'USE="%s"' % " ".join(use),
13359                                 for varname in use_expand:
13360                                         myval = settings.get(varname)
13361                                         if myval:
13362                                                 print '%s="%s"' % (varname, myval),
13363                                 print
13364                 else:
13365                         unset_vars.append(x)
13366         if unset_vars:
13367                 print "Unset:  "+", ".join(unset_vars)
13368         print
13369
13370         if "--debug" in myopts:
13371                 for x in dir(portage):
13372                         module = getattr(portage, x)
13373                         if "cvs_id_string" in dir(module):
13374                                 print "%s: %s" % (str(x), str(module.cvs_id_string))
13375
13376         # See if we can find any packages installed matching the strings
13377         # passed on the command line
13378         mypkgs = []
13379         vardb = trees[settings["ROOT"]]["vartree"].dbapi
13380         portdb = trees[settings["ROOT"]]["porttree"].dbapi
13381         for x in myfiles:
13382                 mypkgs.extend(vardb.match(x))
13383
13384         # If some packages were found...
13385         if mypkgs:
13386                 # Get our global settings (we only print stuff if it varies from
13387                 # the current config)
13388                 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
13389                 auxkeys = mydesiredvars + [ "USE", "IUSE"]
13390                 global_vals = {}
13391                 pkgsettings = portage.config(clone=settings)
13392
13393                 for myvar in mydesiredvars:
13394                         global_vals[myvar] = set(settings.get(myvar, "").split())
13395
13396                 # Loop through each package
13397                 # Only print settings if they differ from global settings
13398                 header_title = "Package Settings"
13399                 print header_width * "="
13400                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13401                 print header_width * "="
13402                 from portage.output import EOutput
13403                 out = EOutput()
13404                 for pkg in mypkgs:
13405                         # Get all package specific variables
13406                         auxvalues = vardb.aux_get(pkg, auxkeys)
13407                         valuesmap = {}
13408                         for i in xrange(len(auxkeys)):
13409                                 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
13410                         diff_values = {}
13411                         for myvar in mydesiredvars:
13412                                 # If the package variable doesn't match the
13413                                 # current global variable, something has changed
13414                                 # so set diff_found so we know to print
13415                                 if valuesmap[myvar] != global_vals[myvar]:
13416                                         diff_values[myvar] = valuesmap[myvar]
13417                         valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
13418                         valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
13419                         pkgsettings.reset()
13420                         # If a matching ebuild is no longer available in the tree, maybe it
13421                         # would make sense to compare against the flags for the best
13422                         # available version with the same slot?
13423                         mydb = None
13424                         if portdb.cpv_exists(pkg):
13425                                 mydb = portdb
13426                         pkgsettings.setcpv(pkg, mydb=mydb)
13427                         if valuesmap["IUSE"].intersection(
13428                                 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
13429                                 diff_values["USE"] = valuesmap["USE"]
13430                         # If a difference was found, print the info for
13431                         # this package.
13432                         if diff_values:
13433                                 # Print package info
13434                                 print "%s was built with the following:" % pkg
13435                                 for myvar in mydesiredvars + ["USE"]:
13436                                         if myvar in diff_values:
13437                                                 mylist = list(diff_values[myvar])
13438                                                 mylist.sort()
13439                                                 print "%s=\"%s\"" % (myvar, " ".join(mylist))
13440                                 print
13441                         print ">>> Attempting to run pkg_info() for '%s'" % pkg
13442                         ebuildpath = vardb.findname(pkg)
13443                         if not ebuildpath or not os.path.exists(ebuildpath):
13444                                 out.ewarn("No ebuild found for '%s'" % pkg)
13445                                 continue
13446                         portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13447                                 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13448                                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13449                                 tree="vartree")
13450
13451 def action_search(root_config, myopts, myfiles, spinner):
13452         if not myfiles:
13453                 print "emerge: no search terms provided."
13454         else:
13455                 searchinstance = search(root_config,
13456                         spinner, "--searchdesc" in myopts,
13457                         "--quiet" not in myopts, "--usepkg" in myopts,
13458                         "--usepkgonly" in myopts)
13459                 for mysearch in myfiles:
13460                         try:
13461                                 searchinstance.execute(mysearch)
13462                         except re.error, comment:
13463                                 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13464                                 sys.exit(1)
13465                         searchinstance.output()
13466
13467 def action_depclean(settings, trees, ldpath_mtimes,
13468         myopts, action, myfiles, spinner):
13469         # Kill packages that aren't explicitly merged or are required as a
13470         # dependency of another package. World file is explicit.
13471
13472         # Global depclean or prune operations are not very safe when there are
13473         # missing dependencies since it's unknown how badly incomplete
13474         # the dependency graph is, and we might accidentally remove packages
13475         # that should have been pulled into the graph. On the other hand, it's
13476         # relatively safe to ignore missing deps when only asked to remove
13477         # specific packages.
13478         allow_missing_deps = len(myfiles) > 0
13479
13480         msg = []
13481         msg.append("Always study the list of packages to be cleaned for any obvious\n")
13482         msg.append("mistakes. Packages that are part of the world set will always\n")
13483         msg.append("be kept.  They can be manually added to this set with\n")
13484         msg.append(good("`emerge --noreplace <atom>`") + ".  Packages that are listed in\n")
13485         msg.append("package.provided (see portage(5)) will be removed by\n")
13486         msg.append("depclean, even if they are part of the world set.\n")
13487         msg.append("\n")
13488         msg.append("As a safety measure, depclean will not remove any packages\n")
13489         msg.append("unless *all* required dependencies have been resolved.  As a\n")
13490         msg.append("consequence, it is often necessary to run %s\n" % \
13491                 good("`emerge --update"))
13492         msg.append(good("--newuse --deep @system @world`") + \
13493                 " prior to depclean.\n")
13494
13495         if action == "depclean" and "--quiet" not in myopts and not myfiles:
13496                 portage.writemsg_stdout("\n")
13497                 for x in msg:
13498                         portage.writemsg_stdout(colorize("WARN", " * ") + x)
13499
13500         xterm_titles = "notitles" not in settings.features
13501         myroot = settings["ROOT"]
13502         root_config = trees[myroot]["root_config"]
13503         getSetAtoms = root_config.setconfig.getSetAtoms
13504         vardb = trees[myroot]["vartree"].dbapi
13505
13506         required_set_names = ("system", "world")
13507         required_sets = {}
13508         set_args = []
13509
13510         for s in required_set_names:
13511                 required_sets[s] = InternalPackageSet(
13512                         initial_atoms=getSetAtoms(s))
13513
13514         
13515         # When removing packages, use a temporary version of world
13516         # which excludes packages that are intended to be eligible for
13517         # removal.
13518         world_temp_set = required_sets["world"]
13519         system_set = required_sets["system"]
13520
13521         if not system_set or not world_temp_set:
13522
13523                 if not system_set:
13524                         writemsg_level("!!! You have no system list.\n",
13525                                 level=logging.ERROR, noiselevel=-1)
13526
13527                 if not world_temp_set:
13528                         writemsg_level("!!! You have no world file.\n",
13529                                         level=logging.WARNING, noiselevel=-1)
13530
13531                 writemsg_level("!!! Proceeding is likely to " + \
13532                         "break your installation.\n",
13533                         level=logging.WARNING, noiselevel=-1)
13534                 if "--pretend" not in myopts:
13535                         countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
13536
13537         if action == "depclean":
13538                 emergelog(xterm_titles, " >>> depclean")
13539
13540         import textwrap
13541         args_set = InternalPackageSet()
13542         if myfiles:
13543                 for x in myfiles:
13544                         if not is_valid_package_atom(x):
13545                                 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
13546                                         level=logging.ERROR, noiselevel=-1)
13547                                 writemsg_level("!!! Please check ebuild(5) for full details.\n")
13548                                 return
13549                         try:
13550                                 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
13551                         except portage.exception.AmbiguousPackageName, e:
13552                                 msg = "The short ebuild name \"" + x + \
13553                                         "\" is ambiguous.  Please specify " + \
13554                                         "one of the following " + \
13555                                         "fully-qualified ebuild names instead:"
13556                                 for line in textwrap.wrap(msg, 70):
13557                                         writemsg_level("!!! %s\n" % (line,),
13558                                                 level=logging.ERROR, noiselevel=-1)
13559                                 for i in e[0]:
13560                                         writemsg_level("    %s\n" % colorize("INFORM", i),
13561                                                 level=logging.ERROR, noiselevel=-1)
13562                                 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13563                                 return
13564                         args_set.add(atom)
13565                 matched_packages = False
13566                 for x in args_set:
13567                         if vardb.match(x):
13568                                 matched_packages = True
13569                                 break
13570                 if not matched_packages:
13571                         writemsg_level(">>> No packages selected for removal by %s\n" % \
13572                                 action)
13573                         return
13574
13575         writemsg_level("\nCalculating dependencies  ")
13576         resolver_params = create_depgraph_params(myopts, "remove")
13577         resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
13578         vardb = resolver.trees[myroot]["vartree"].dbapi
13579
13580         if action == "depclean":
13581
13582                 if args_set:
13583                         # Pull in everything that's installed but not matched
13584                         # by an argument atom since we don't want to clean any
13585                         # package if something depends on it.
13586
13587                         world_temp_set.clear()
13588                         for pkg in vardb:
13589                                 spinner.update()
13590
13591                                 try:
13592                                         if args_set.findAtomForPackage(pkg) is None:
13593                                                 world_temp_set.add("=" + pkg.cpv)
13594                                                 continue
13595                                 except portage.exception.InvalidDependString, e:
13596                                         show_invalid_depstring_notice(pkg,
13597                                                 pkg.metadata["PROVIDE"], str(e))
13598                                         del e
13599                                         world_temp_set.add("=" + pkg.cpv)
13600                                         continue
13601
13602         elif action == "prune":
13603
13604                 # Pull in everything that's installed since we don't
13605                 # to prune a package if something depends on it.
13606                 world_temp_set.clear()
13607                 world_temp_set.update(vardb.cp_all())
13608
13609                 if not args_set:
13610
13611                         # Try to prune everything that's slotted.
13612                         for cp in vardb.cp_all():
13613                                 if len(vardb.cp_list(cp)) > 1:
13614                                         args_set.add(cp)
13615
13616                 # Remove atoms from world that match installed packages
13617                 # that are also matched by argument atoms, but do not remove
13618                 # them if they match the highest installed version.
13619                 for pkg in vardb:
13620                         spinner.update()
13621                         pkgs_for_cp = vardb.match_pkgs(pkg.cp)
13622                         if not pkgs_for_cp or pkg not in pkgs_for_cp:
13623                                 raise AssertionError("package expected in matches: " + \
13624                                         "cp = %s, cpv = %s matches = %s" % \
13625                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13626
13627                         highest_version = pkgs_for_cp[-1]
13628                         if pkg == highest_version:
13629                                 # pkg is the highest version
13630                                 world_temp_set.add("=" + pkg.cpv)
13631                                 continue
13632
13633                         if len(pkgs_for_cp) <= 1:
13634                                 raise AssertionError("more packages expected: " + \
13635                                         "cp = %s, cpv = %s matches = %s" % \
13636                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13637
13638                         try:
13639                                 if args_set.findAtomForPackage(pkg) is None:
13640                                         world_temp_set.add("=" + pkg.cpv)
13641                                         continue
13642                         except portage.exception.InvalidDependString, e:
13643                                 show_invalid_depstring_notice(pkg,
13644                                         pkg.metadata["PROVIDE"], str(e))
13645                                 del e
13646                                 world_temp_set.add("=" + pkg.cpv)
13647                                 continue
13648
13649         set_args = {}
13650         for s, package_set in required_sets.iteritems():
13651                 set_atom = SETPREFIX + s
13652                 set_arg = SetArg(arg=set_atom, set=package_set,
13653                         root_config=resolver.roots[myroot])
13654                 set_args[s] = set_arg
13655                 for atom in set_arg.set:
13656                         resolver._dep_stack.append(
13657                                 Dependency(atom=atom, root=myroot, parent=set_arg))
13658                         resolver.digraph.add(set_arg, None)
13659
13660         success = resolver._complete_graph()
13661         writemsg_level("\b\b... done!\n")
13662
13663         resolver.display_problems()
13664
13665         if not success:
13666                 return 1
13667
13668         def unresolved_deps():
13669
13670                 unresolvable = set()
13671                 for dep in resolver._initially_unsatisfied_deps:
13672                         if isinstance(dep.parent, Package) and \
13673                                 (dep.priority > UnmergeDepPriority.SOFT):
13674                                 unresolvable.add((dep.atom, dep.parent.cpv))
13675
13676                 if not unresolvable:
13677                         return False
13678
13679                 if unresolvable and not allow_missing_deps:
13680                         prefix = bad(" * ")
13681                         msg = []
13682                         msg.append("Dependencies could not be completely resolved due to")
13683                         msg.append("the following required packages not being installed:")
13684                         msg.append("")
13685                         for atom, parent in unresolvable:
13686                                 msg.append("  %s pulled in by:" % (atom,))
13687                                 msg.append("    %s" % (parent,))
13688                                 msg.append("")
13689                         msg.append("Have you forgotten to run " + \
13690                                 good("`emerge --update --newuse --deep @system @world`") + " prior")
13691                         msg.append(("to %s? It may be necessary to manually " + \
13692                                 "uninstall packages that no longer") % action)
13693                         msg.append("exist in the portage tree since " + \
13694                                 "it may not be possible to satisfy their")
13695                         msg.append("dependencies.  Also, be aware of " + \
13696                                 "the --with-bdeps option that is documented")
13697                         msg.append("in " + good("`man emerge`") + ".")
13698                         if action == "prune":
13699                                 msg.append("")
13700                                 msg.append("If you would like to ignore " + \
13701                                         "dependencies then use %s." % good("--nodeps"))
13702                         writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
13703                                 level=logging.ERROR, noiselevel=-1)
13704                         return True
13705                 return False
13706
13707         if unresolved_deps():
13708                 return 1
13709
13710         graph = resolver.digraph.copy()
13711         required_pkgs_total = 0
13712         for node in graph:
13713                 if isinstance(node, Package):
13714                         required_pkgs_total += 1
13715
13716         def show_parents(child_node):
13717                 parent_nodes = graph.parent_nodes(child_node)
13718                 if not parent_nodes:
13719                         # With --prune, the highest version can be pulled in without any
13720                         # real parent since all installed packages are pulled in.  In that
13721                         # case there's nothing to show here.
13722                         return
13723                 parent_strs = []
13724                 for node in parent_nodes:
13725                         parent_strs.append(str(getattr(node, "cpv", node)))
13726                 parent_strs.sort()
13727                 msg = []
13728                 msg.append("  %s pulled in by:\n" % (child_node.cpv,))
13729                 for parent_str in parent_strs:
13730                         msg.append("    %s\n" % (parent_str,))
13731                 msg.append("\n")
13732                 portage.writemsg_stdout("".join(msg), noiselevel=-1)
13733
13734         def cmp_pkg_cpv(pkg1, pkg2):
13735                 """Sort Package instances by cpv."""
13736                 if pkg1.cpv > pkg2.cpv:
13737                         return 1
13738                 elif pkg1.cpv == pkg2.cpv:
13739                         return 0
13740                 else:
13741                         return -1
13742
13743         def create_cleanlist():
13744                 pkgs_to_remove = []
13745
13746                 if action == "depclean":
13747                         if args_set:
13748
13749                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13750                                         arg_atom = None
13751                                         try:
13752                                                 arg_atom = args_set.findAtomForPackage(pkg)
13753                                         except portage.exception.InvalidDependString:
13754                                                 # this error has already been displayed by now
13755                                                 continue
13756
13757                                         if arg_atom:
13758                                                 if pkg not in graph:
13759                                                         pkgs_to_remove.append(pkg)
13760                                                 elif "--verbose" in myopts:
13761                                                         show_parents(pkg)
13762
13763                         else:
13764                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13765                                         if pkg not in graph:
13766                                                 pkgs_to_remove.append(pkg)
13767                                         elif "--verbose" in myopts:
13768                                                 show_parents(pkg)
13769
13770                 elif action == "prune":
13771                         # Prune really uses all installed instead of world. It's not
13772                         # a real reverse dependency so don't display it as such.
13773                         graph.remove(set_args["world"])
13774
13775                         for atom in args_set:
13776                                 for pkg in vardb.match_pkgs(atom):
13777                                         if pkg not in graph:
13778                                                 pkgs_to_remove.append(pkg)
13779                                         elif "--verbose" in myopts:
13780                                                 show_parents(pkg)
13781
13782                 if not pkgs_to_remove:
13783                         writemsg_level(
13784                                 ">>> No packages selected for removal by %s\n" % action)
13785                         if "--verbose" not in myopts:
13786                                 writemsg_level(
13787                                         ">>> To see reverse dependencies, use %s\n" % \
13788                                                 good("--verbose"))
13789                         if action == "prune":
13790                                 writemsg_level(
13791                                         ">>> To ignore dependencies, use %s\n" % \
13792                                                 good("--nodeps"))
13793
13794                 return pkgs_to_remove
13795
13796         cleanlist = create_cleanlist()
13797
13798         if len(cleanlist):
13799                 clean_set = set(cleanlist)
13800
13801                 # Check if any of these package are the sole providers of libraries
13802                 # with consumers that have not been selected for removal. If so, these
13803                 # packages and any dependencies need to be added to the graph.
13804                 real_vardb = trees[myroot]["vartree"].dbapi
13805                 linkmap = real_vardb.linkmap
13806                 liblist = linkmap.listLibraryObjects()
13807                 consumer_cache = {}
13808                 provider_cache = {}
13809                 soname_cache = {}
13810                 consumer_map = {}
13811
13812                 writemsg_level(">>> Checking for lib consumers...\n")
13813
13814                 for pkg in cleanlist:
13815                         pkg_dblink = real_vardb._dblink(pkg.cpv)
13816                         provided_libs = set()
13817
13818                         for lib in liblist:
13819                                 if pkg_dblink.isowner(lib, myroot):
13820                                         provided_libs.add(lib)
13821
13822                         if not provided_libs:
13823                                 continue
13824
13825                         consumers = {}
13826                         for lib in provided_libs:
13827                                 lib_consumers = consumer_cache.get(lib)
13828                                 if lib_consumers is None:
13829                                         lib_consumers = linkmap.findConsumers(lib)
13830                                         consumer_cache[lib] = lib_consumers
13831                                 if lib_consumers:
13832                                         consumers[lib] = lib_consumers
13833
13834                         if not consumers:
13835                                 continue
13836
13837                         for lib, lib_consumers in consumers.items():
13838                                 for consumer_file in list(lib_consumers):
13839                                         if pkg_dblink.isowner(consumer_file, myroot):
13840                                                 lib_consumers.remove(consumer_file)
13841                                 if not lib_consumers:
13842                                         del consumers[lib]
13843
13844                         if not consumers:
13845                                 continue
13846
13847                         for lib, lib_consumers in consumers.iteritems():
13848
13849                                 soname = soname_cache.get(lib)
13850                                 if soname is None:
13851                                         soname = linkmap.getSoname(lib)
13852                                         soname_cache[lib] = soname
13853
13854                                 consumer_providers = []
13855                                 for lib_consumer in lib_consumers:
13856                                         providers = provider_cache.get(lib)
13857                                         if providers is None:
13858                                                 providers = linkmap.findProviders(lib_consumer)
13859                                                 provider_cache[lib_consumer] = providers
13860                                         if soname not in providers:
13861                                                 # Why does this happen?
13862                                                 continue
13863                                         consumer_providers.append(
13864                                                 (lib_consumer, providers[soname]))
13865
13866                                 consumers[lib] = consumer_providers
13867
13868                         consumer_map[pkg] = consumers
13869
13870                 if consumer_map:
13871
13872                         search_files = set()
13873                         for consumers in consumer_map.itervalues():
13874                                 for lib, consumer_providers in consumers.iteritems():
13875                                         for lib_consumer, providers in consumer_providers:
13876                                                 search_files.add(lib_consumer)
13877                                                 search_files.update(providers)
13878
13879                         writemsg_level(">>> Assigning files to packages...\n")
13880                         file_owners = real_vardb._owners.getFileOwnerMap(search_files)
13881
13882                         for pkg, consumers in consumer_map.items():
13883                                 for lib, consumer_providers in consumers.items():
13884                                         lib_consumers = set()
13885
13886                                         for lib_consumer, providers in consumer_providers:
13887                                                 owner_set = file_owners.get(lib_consumer)
13888                                                 provider_dblinks = set()
13889                                                 provider_pkgs = set()
13890
13891                                                 if len(providers) > 1:
13892                                                         for provider in providers:
13893                                                                 provider_set = file_owners.get(provider)
13894                                                                 if provider_set is not None:
13895                                                                         provider_dblinks.update(provider_set)
13896
13897                                                 if len(provider_dblinks) > 1:
13898                                                         for provider_dblink in provider_dblinks:
13899                                                                 pkg_key = ("installed", myroot,
13900                                                                         provider_dblink.mycpv, "nomerge")
13901                                                                 if pkg_key not in clean_set:
13902                                                                         provider_pkgs.add(vardb.get(pkg_key))
13903
13904                                                 if provider_pkgs:
13905                                                         continue
13906
13907                                                 if owner_set is not None:
13908                                                         lib_consumers.update(owner_set)
13909
13910                                         for consumer_dblink in list(lib_consumers):
13911                                                 if ("installed", myroot, consumer_dblink.mycpv,
13912                                                         "nomerge") in clean_set:
13913                                                         lib_consumers.remove(consumer_dblink)
13914                                                         continue
13915
13916                                         if lib_consumers:
13917                                                 consumers[lib] = lib_consumers
13918                                         else:
13919                                                 del consumers[lib]
13920                                 if not consumers:
13921                                         del consumer_map[pkg]
13922
13923                 if consumer_map:
13924                         # TODO: Implement a package set for rebuilding consumer packages.
13925
13926                         msg = "In order to avoid breakage of link level " + \
13927                                 "dependencies, one or more packages will not be removed. " + \
13928                                 "This can be solved by rebuilding " + \
13929                                 "the packages that pulled them in."
13930
13931                         prefix = bad(" * ")
13932                         from textwrap import wrap
13933                         writemsg_level("".join(prefix + "%s\n" % line for \
13934                                 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
13935
13936                         msg = []
13937                         for pkg, consumers in consumer_map.iteritems():
13938                                 unique_consumers = set(chain(*consumers.values()))
13939                                 unique_consumers = sorted(consumer.mycpv \
13940                                         for consumer in unique_consumers)
13941                                 msg.append("")
13942                                 msg.append("  %s pulled in by:" % (pkg.cpv,))
13943                                 for consumer in unique_consumers:
13944                                         msg.append("    %s" % (consumer,))
13945                         msg.append("")
13946                         writemsg_level("".join(prefix + "%s\n" % line for line in msg),
13947                                 level=logging.WARNING, noiselevel=-1)
13948
13949                         # Add lib providers to the graph as children of lib consumers,
13950                         # and also add any dependencies pulled in by the provider.
13951                         writemsg_level(">>> Adding lib providers to graph...\n")
13952
13953                         for pkg, consumers in consumer_map.iteritems():
13954                                 for consumer_dblink in set(chain(*consumers.values())):
13955                                         consumer_pkg = vardb.get(("installed", myroot,
13956                                                 consumer_dblink.mycpv, "nomerge"))
13957                                         if not resolver._add_pkg(pkg,
13958                                                 Dependency(parent=consumer_pkg,
13959                                                 priority=UnmergeDepPriority(runtime=True),
13960                                                 root=pkg.root)):
13961                                                 resolver.display_problems()
13962                                                 return 1
13963
13964                         writemsg_level("\nCalculating dependencies  ")
13965                         success = resolver._complete_graph()
13966                         writemsg_level("\b\b... done!\n")
13967                         resolver.display_problems()
13968                         if not success:
13969                                 return 1
13970                         if unresolved_deps():
13971                                 return 1
13972
13973                         graph = resolver.digraph.copy()
13974                         required_pkgs_total = 0
13975                         for node in graph:
13976                                 if isinstance(node, Package):
13977                                         required_pkgs_total += 1
13978                         cleanlist = create_cleanlist()
13979                         if not cleanlist:
13980                                 return 0
13981                         clean_set = set(cleanlist)
13982
13983                 # Use a topological sort to create an unmerge order such that
13984                 # each package is unmerged before it's dependencies. This is
13985                 # necessary to avoid breaking things that may need to run
13986                 # during pkg_prerm or pkg_postrm phases.
13987
13988                 # Create a new graph to account for dependencies between the
13989                 # packages being unmerged.
13990                 graph = digraph()
13991                 del cleanlist[:]
13992
13993                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
13994                 runtime = UnmergeDepPriority(runtime=True)
13995                 runtime_post = UnmergeDepPriority(runtime_post=True)
13996                 buildtime = UnmergeDepPriority(buildtime=True)
13997                 priority_map = {
13998                         "RDEPEND": runtime,
13999                         "PDEPEND": runtime_post,
14000                         "DEPEND": buildtime,
14001                 }
14002
14003                 for node in clean_set:
14004                         graph.add(node, None)
14005                         mydeps = []
14006                         node_use = node.metadata["USE"].split()
14007                         for dep_type in dep_keys:
14008                                 depstr = node.metadata[dep_type]
14009                                 if not depstr:
14010                                         continue
14011                                 try:
14012                                         portage.dep._dep_check_strict = False
14013                                         success, atoms = portage.dep_check(depstr, None, settings,
14014                                                 myuse=node_use, trees=resolver._graph_trees,
14015                                                 myroot=myroot)
14016                                 finally:
14017                                         portage.dep._dep_check_strict = True
14018                                 if not success:
14019                                         # Ignore invalid deps of packages that will
14020                                         # be uninstalled anyway.
14021                                         continue
14022
14023                                 priority = priority_map[dep_type]
14024                                 for atom in atoms:
14025                                         if not isinstance(atom, portage.dep.Atom):
14026                                                 # Ignore invalid atoms returned from dep_check().
14027                                                 continue
14028                                         if atom.blocker:
14029                                                 continue
14030                                         matches = vardb.match_pkgs(atom)
14031                                         if not matches:
14032                                                 continue
14033                                         for child_node in matches:
14034                                                 if child_node in clean_set:
14035                                                         graph.add(child_node, node, priority=priority)
14036
14037                 ordered = True
14038                 if len(graph.order) == len(graph.root_nodes()):
14039                         # If there are no dependencies between packages
14040                         # let unmerge() group them by cat/pn.
14041                         ordered = False
14042                         cleanlist = [pkg.cpv for pkg in graph.order]
14043                 else:
14044                         # Order nodes from lowest to highest overall reference count for
14045                         # optimal root node selection.
14046                         node_refcounts = {}
14047                         for node in graph.order:
14048                                 node_refcounts[node] = len(graph.parent_nodes(node))
14049                         def cmp_reference_count(node1, node2):
14050                                 return node_refcounts[node1] - node_refcounts[node2]
14051                         graph.order.sort(key=cmp_sort_key(cmp_reference_count))
14052         
14053                         ignore_priority_range = [None]
14054                         ignore_priority_range.extend(
14055                                 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
14056                         while not graph.empty():
14057                                 for ignore_priority in ignore_priority_range:
14058                                         nodes = graph.root_nodes(ignore_priority=ignore_priority)
14059                                         if nodes:
14060                                                 break
14061                                 if not nodes:
14062                                         raise AssertionError("no root nodes")
14063                                 if ignore_priority is not None:
14064                                         # Some deps have been dropped due to circular dependencies,
14065                                         # so only pop one node in order do minimize the number that
14066                                         # are dropped.
14067                                         del nodes[1:]
14068                                 for node in nodes:
14069                                         graph.remove(node)
14070                                         cleanlist.append(node.cpv)
14071
14072                 unmerge(root_config, myopts, "unmerge", cleanlist,
14073                         ldpath_mtimes, ordered=ordered)
14074
14075         if action == "prune":
14076                 return
14077
14078         if not cleanlist and "--quiet" in myopts:
14079                 return
14080
14081         print "Packages installed:   "+str(len(vardb.cpv_all()))
14082         print "Packages in world:    " + \
14083                 str(len(root_config.sets["world"].getAtoms()))
14084         print "Packages in system:   " + \
14085                 str(len(root_config.sets["system"].getAtoms()))
14086         print "Required packages:    "+str(required_pkgs_total)
14087         if "--pretend" in myopts:
14088                 print "Number to remove:     "+str(len(cleanlist))
14089         else:
14090                 print "Number removed:       "+str(len(cleanlist))
14091
14092 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
14093         """
14094         Construct a depgraph for the given resume list. This will raise
14095         PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
14096         @rtype: tuple
14097         @returns: (success, depgraph, dropped_tasks)
14098         """
14099         skip_masked = True
14100         skip_unsatisfied = True
14101         mergelist = mtimedb["resume"]["mergelist"]
14102         dropped_tasks = set()
14103         while True:
14104                 mydepgraph = depgraph(settings, trees,
14105                         myopts, myparams, spinner)
14106                 try:
14107                         success = mydepgraph.loadResumeCommand(mtimedb["resume"],
14108                                 skip_masked=skip_masked)
14109                 except depgraph.UnsatisfiedResumeDep, e:
14110                         if not skip_unsatisfied:
14111                                 raise
14112
14113                         graph = mydepgraph.digraph
14114                         unsatisfied_parents = dict((dep.parent, dep.parent) \
14115                                 for dep in e.value)
14116                         traversed_nodes = set()
14117                         unsatisfied_stack = list(unsatisfied_parents)
14118                         while unsatisfied_stack:
14119                                 pkg = unsatisfied_stack.pop()
14120                                 if pkg in traversed_nodes:
14121                                         continue
14122                                 traversed_nodes.add(pkg)
14123
14124                                 # If this package was pulled in by a parent
14125                                 # package scheduled for merge, removing this
14126                                 # package may cause the the parent package's
14127                                 # dependency to become unsatisfied.
14128                                 for parent_node in graph.parent_nodes(pkg):
14129                                         if not isinstance(parent_node, Package) \
14130                                                 or parent_node.operation not in ("merge", "nomerge"):
14131                                                 continue
14132                                         unsatisfied = \
14133                                                 graph.child_nodes(parent_node,
14134                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
14135                                         if pkg in unsatisfied:
14136                                                 unsatisfied_parents[parent_node] = parent_node
14137                                                 unsatisfied_stack.append(parent_node)
14138
14139                         pruned_mergelist = []
14140                         for x in mergelist:
14141                                 if isinstance(x, list) and \
14142                                         tuple(x) not in unsatisfied_parents:
14143                                         pruned_mergelist.append(x)
14144
14145                         # If the mergelist doesn't shrink then this loop is infinite.
14146                         if len(pruned_mergelist) == len(mergelist):
14147                                 # This happens if a package can't be dropped because
14148                                 # it's already installed, but it has unsatisfied PDEPEND.
14149                                 raise
14150                         mergelist[:] = pruned_mergelist
14151
14152                         # Exclude installed packages that have been removed from the graph due
14153                         # to failure to build/install runtime dependencies after the dependent
14154                         # package has already been installed.
14155                         dropped_tasks.update(pkg for pkg in \
14156                                 unsatisfied_parents if pkg.operation != "nomerge")
14157                         mydepgraph.break_refs(unsatisfied_parents)
14158
14159                         del e, graph, traversed_nodes, \
14160                                 unsatisfied_parents, unsatisfied_stack
14161                         continue
14162                 else:
14163                         break
14164         return (success, mydepgraph, dropped_tasks)
14165
14166 def action_build(settings, trees, mtimedb,
14167         myopts, myaction, myfiles, spinner):
14168
14169         # validate the state of the resume data
14170         # so that we can make assumptions later.
14171         for k in ("resume", "resume_backup"):
14172                 if k not in mtimedb:
14173                         continue
14174                 resume_data = mtimedb[k]
14175                 if not isinstance(resume_data, dict):
14176                         del mtimedb[k]
14177                         continue
14178                 mergelist = resume_data.get("mergelist")
14179                 if not isinstance(mergelist, list):
14180                         del mtimedb[k]
14181                         continue
14182                 for x in mergelist:
14183                         if not (isinstance(x, list) and len(x) == 4):
14184                                 continue
14185                         pkg_type, pkg_root, pkg_key, pkg_action = x
14186                         if pkg_root not in trees:
14187                                 # Current $ROOT setting differs,
14188                                 # so the list must be stale.
14189                                 mergelist = None
14190                                 break
14191                 if not mergelist:
14192                         del mtimedb[k]
14193                         continue
14194                 resume_opts = resume_data.get("myopts")
14195                 if not isinstance(resume_opts, (dict, list)):
14196                         del mtimedb[k]
14197                         continue
14198                 favorites = resume_data.get("favorites")
14199                 if not isinstance(favorites, list):
14200                         del mtimedb[k]
14201                         continue
14202
14203         resume = False
14204         if "--resume" in myopts and \
14205                 ("resume" in mtimedb or
14206                 "resume_backup" in mtimedb):
14207                 resume = True
14208                 if "resume" not in mtimedb:
14209                         mtimedb["resume"] = mtimedb["resume_backup"]
14210                         del mtimedb["resume_backup"]
14211                         mtimedb.commit()
14212                 # "myopts" is a list for backward compatibility.
14213                 resume_opts = mtimedb["resume"].get("myopts", [])
14214                 if isinstance(resume_opts, list):
14215                         resume_opts = dict((k,True) for k in resume_opts)
14216                 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
14217                         resume_opts.pop(opt, None)
14218                 myopts.update(resume_opts)
14219
14220                 if "--debug" in myopts:
14221                         writemsg_level("myopts %s\n" % (myopts,))
14222
14223                 # Adjust config according to options of the command being resumed.
14224                 for myroot in trees:
14225                         mysettings =  trees[myroot]["vartree"].settings
14226                         mysettings.unlock()
14227                         adjust_config(myopts, mysettings)
14228                         mysettings.lock()
14229                         del myroot, mysettings
14230
14231         ldpath_mtimes = mtimedb["ldpath"]
14232         favorites=[]
14233         merge_count = 0
14234         buildpkgonly = "--buildpkgonly" in myopts
14235         pretend = "--pretend" in myopts
14236         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14237         ask = "--ask" in myopts
14238         nodeps = "--nodeps" in myopts
14239         oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
14240         tree = "--tree" in myopts
14241         if nodeps and tree:
14242                 tree = False
14243                 del myopts["--tree"]
14244                 portage.writemsg(colorize("WARN", " * ") + \
14245                         "--tree is broken with --nodeps. Disabling...\n")
14246         debug = "--debug" in myopts
14247         verbose = "--verbose" in myopts
14248         quiet = "--quiet" in myopts
14249         if pretend or fetchonly:
14250                 # make the mtimedb readonly
14251                 mtimedb.filename = None
14252         if "--digest" in myopts:
14253                 msg = "The --digest option can prevent corruption from being" + \
14254                         " noticed. The `repoman manifest` command is the preferred" + \
14255                         " way to generate manifests and it is capable of doing an" + \
14256                         " entire repository or category at once."
14257                 prefix = bad(" * ")
14258                 writemsg(prefix + "\n")
14259                 from textwrap import wrap
14260                 for line in wrap(msg, 72):
14261                         writemsg("%s%s\n" % (prefix, line))
14262                 writemsg(prefix + "\n")
14263
14264         if "--quiet" not in myopts and \
14265                 ("--pretend" in myopts or "--ask" in myopts or \
14266                 "--tree" in myopts or "--verbose" in myopts):
14267                 action = ""
14268                 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14269                         action = "fetched"
14270                 elif "--buildpkgonly" in myopts:
14271                         action = "built"
14272                 else:
14273                         action = "merged"
14274                 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
14275                         print
14276                         print darkgreen("These are the packages that would be %s, in reverse order:") % action
14277                         print
14278                 else:
14279                         print
14280                         print darkgreen("These are the packages that would be %s, in order:") % action
14281                         print
14282
14283         show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
14284         if not show_spinner:
14285                 spinner.update = spinner.update_quiet
14286
14287         if resume:
14288                 favorites = mtimedb["resume"].get("favorites")
14289                 if not isinstance(favorites, list):
14290                         favorites = []
14291
14292                 if show_spinner:
14293                         print "Calculating dependencies  ",
14294                 myparams = create_depgraph_params(myopts, myaction)
14295
14296                 resume_data = mtimedb["resume"]
14297                 mergelist = resume_data["mergelist"]
14298                 if mergelist and "--skipfirst" in myopts:
14299                         for i, task in enumerate(mergelist):
14300                                 if isinstance(task, list) and \
14301                                         task and task[-1] == "merge":
14302                                         del mergelist[i]
14303                                         break
14304
14305                 success = False
14306                 mydepgraph = None
14307                 try:
14308                         success, mydepgraph, dropped_tasks = resume_depgraph(
14309                                 settings, trees, mtimedb, myopts, myparams, spinner)
14310                 except (portage.exception.PackageNotFound,
14311                         depgraph.UnsatisfiedResumeDep), e:
14312                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
14313                                 mydepgraph = e.depgraph
14314                         if show_spinner:
14315                                 print
14316                         from textwrap import wrap
14317                         from portage.output import EOutput
14318                         out = EOutput()
14319
14320                         resume_data = mtimedb["resume"]
14321                         mergelist = resume_data.get("mergelist")
14322                         if not isinstance(mergelist, list):
14323                                 mergelist = []
14324                         if mergelist and debug or (verbose and not quiet):
14325                                 out.eerror("Invalid resume list:")
14326                                 out.eerror("")
14327                                 indent = "  "
14328                                 for task in mergelist:
14329                                         if isinstance(task, list):
14330                                                 out.eerror(indent + str(tuple(task)))
14331                                 out.eerror("")
14332
14333                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
14334                                 out.eerror("One or more packages are either masked or " + \
14335                                         "have missing dependencies:")
14336                                 out.eerror("")
14337                                 indent = "  "
14338                                 for dep in e.value:
14339                                         if dep.atom is None:
14340                                                 out.eerror(indent + "Masked package:")
14341                                                 out.eerror(2 * indent + str(dep.parent))
14342                                                 out.eerror("")
14343                                         else:
14344                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
14345                                                 out.eerror(2 * indent + str(dep.parent))
14346                                                 out.eerror("")
14347                                 msg = "The resume list contains packages " + \
14348                                         "that are either masked or have " + \
14349                                         "unsatisfied dependencies. " + \
14350                                         "Please restart/continue " + \
14351                                         "the operation manually, or use --skipfirst " + \
14352                                         "to skip the first package in the list and " + \
14353                                         "any other packages that may be " + \
14354                                         "masked or have missing dependencies."
14355                                 for line in wrap(msg, 72):
14356                                         out.eerror(line)
14357                         elif isinstance(e, portage.exception.PackageNotFound):
14358                                 out.eerror("An expected package is " + \
14359                                         "not available: %s" % str(e))
14360                                 out.eerror("")
14361                                 msg = "The resume list contains one or more " + \
14362                                         "packages that are no longer " + \
14363                                         "available. Please restart/continue " + \
14364                                         "the operation manually."
14365                                 for line in wrap(msg, 72):
14366                                         out.eerror(line)
14367                 else:
14368                         if show_spinner:
14369                                 print "\b\b... done!"
14370
14371                 if success:
14372                         if dropped_tasks:
14373                                 portage.writemsg("!!! One or more packages have been " + \
14374                                         "dropped due to\n" + \
14375                                         "!!! masking or unsatisfied dependencies:\n\n",
14376                                         noiselevel=-1)
14377                                 for task in dropped_tasks:
14378                                         portage.writemsg("  " + str(task) + "\n", noiselevel=-1)
14379                                 portage.writemsg("\n", noiselevel=-1)
14380                         del dropped_tasks
14381                 else:
14382                         if mydepgraph is not None:
14383                                 mydepgraph.display_problems()
14384                         if not (ask or pretend):
14385                                 # delete the current list and also the backup
14386                                 # since it's probably stale too.
14387                                 for k in ("resume", "resume_backup"):
14388                                         mtimedb.pop(k, None)
14389                                 mtimedb.commit()
14390
14391                         return 1
14392         else:
14393                 if ("--resume" in myopts):
14394                         print darkgreen("emerge: It seems we have nothing to resume...")
14395                         return os.EX_OK
14396
14397                 myparams = create_depgraph_params(myopts, myaction)
14398                 if "--quiet" not in myopts and "--nodeps" not in myopts:
14399                         print "Calculating dependencies  ",
14400                         sys.stdout.flush()
14401                 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
14402                 try:
14403                         retval, favorites = mydepgraph.select_files(myfiles)
14404                 except portage.exception.PackageNotFound, e:
14405                         portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
14406                         return 1
14407                 except portage.exception.PackageSetNotFound, e:
14408                         root_config = trees[settings["ROOT"]]["root_config"]
14409                         display_missing_pkg_set(root_config, e.value)
14410                         return 1
14411                 if show_spinner:
14412                         print "\b\b... done!"
14413                 if not retval:
14414                         mydepgraph.display_problems()
14415                         return 1
14416
14417         if "--pretend" not in myopts and \
14418                 ("--ask" in myopts or "--tree" in myopts or \
14419                 "--verbose" in myopts) and \
14420                 not ("--quiet" in myopts and "--ask" not in myopts):
14421                 if "--resume" in myopts:
14422                         mymergelist = mydepgraph.altlist()
14423                         if len(mymergelist) == 0:
14424                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14425                                 return os.EX_OK
14426                         favorites = mtimedb["resume"]["favorites"]
14427                         retval = mydepgraph.display(
14428                                 mydepgraph.altlist(reversed=tree),
14429                                 favorites=favorites)
14430                         mydepgraph.display_problems()
14431                         if retval != os.EX_OK:
14432                                 return retval
14433                         prompt="Would you like to resume merging these packages?"
14434                 else:
14435                         retval = mydepgraph.display(
14436                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
14437                                 favorites=favorites)
14438                         mydepgraph.display_problems()
14439                         if retval != os.EX_OK:
14440                                 return retval
14441                         mergecount=0
14442                         for x in mydepgraph.altlist():
14443                                 if isinstance(x, Package) and x.operation == "merge":
14444                                         mergecount += 1
14445
14446                         if mergecount==0:
14447                                 sets = trees[settings["ROOT"]]["root_config"].sets
14448                                 world_candidates = None
14449                                 if "--noreplace" in myopts and \
14450                                         not oneshot and favorites:
14451                                         # Sets that are not world candidates are filtered
14452                                         # out here since the favorites list needs to be
14453                                         # complete for depgraph.loadResumeCommand() to
14454                                         # operate correctly.
14455                                         world_candidates = [x for x in favorites \
14456                                                 if not (x.startswith(SETPREFIX) and \
14457                                                 not sets[x[1:]].world_candidate)]
14458                                 if "--noreplace" in myopts and \
14459                                         not oneshot and world_candidates:
14460                                         print
14461                                         for x in world_candidates:
14462                                                 print " %s %s" % (good("*"), x)
14463                                         prompt="Would you like to add these packages to your world favorites?"
14464                                 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
14465                                         prompt="Nothing to merge; would you like to auto-clean packages?"
14466                                 else:
14467                                         print
14468                                         print "Nothing to merge; quitting."
14469                                         print
14470                                         return os.EX_OK
14471                         elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14472                                 prompt="Would you like to fetch the source files for these packages?"
14473                         else:
14474                                 prompt="Would you like to merge these packages?"
14475                 print
14476                 if "--ask" in myopts and userquery(prompt) == "No":
14477                         print
14478                         print "Quitting."
14479                         print
14480                         return os.EX_OK
14481                 # Don't ask again (e.g. when auto-cleaning packages after merge)
14482                 myopts.pop("--ask", None)
14483
14484         if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14485                 if ("--resume" in myopts):
14486                         mymergelist = mydepgraph.altlist()
14487                         if len(mymergelist) == 0:
14488                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14489                                 return os.EX_OK
14490                         favorites = mtimedb["resume"]["favorites"]
14491                         retval = mydepgraph.display(
14492                                 mydepgraph.altlist(reversed=tree),
14493                                 favorites=favorites)
14494                         mydepgraph.display_problems()
14495                         if retval != os.EX_OK:
14496                                 return retval
14497                 else:
14498                         retval = mydepgraph.display(
14499                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
14500                                 favorites=favorites)
14501                         mydepgraph.display_problems()
14502                         if retval != os.EX_OK:
14503                                 return retval
14504                         if "--buildpkgonly" in myopts:
14505                                 graph_copy = mydepgraph.digraph.clone()
14506                                 removed_nodes = set()
14507                                 for node in list(graph_copy.order):
14508                                         if not isinstance(node, Package) or \
14509                                                 node.operation == "nomerge":
14510                                                 removed_nodes.add(node)
14511                                 graph_copy.difference_update(removed_nodes)
14512                                 if not graph_copy.hasallzeros(ignore_priority = \
14513                                         DepPrioritySatisfiedRange.ignore_medium):
14514                                         print "\n!!! --buildpkgonly requires all dependencies to be merged."
14515                                         print "!!! You have to merge the dependencies before you can build this package.\n"
14516                                         return 1
14517         else:
14518                 if "--buildpkgonly" in myopts:
14519                         graph_copy = mydepgraph.digraph.clone()
14520                         removed_nodes = set()
14521                         for node in list(graph_copy.order):
14522                                 if not isinstance(node, Package) or \
14523                                         node.operation == "nomerge":
14524                                         removed_nodes.add(node)
14525                         graph_copy.difference_update(removed_nodes)
14526                         if not graph_copy.hasallzeros(ignore_priority = \
14527                                 DepPrioritySatisfiedRange.ignore_medium):
14528                                 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14529                                 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
14530                                 return 1
14531
14532                 if ("--resume" in myopts):
14533                         favorites=mtimedb["resume"]["favorites"]
14534                         mymergelist = mydepgraph.altlist()
14535                         mydepgraph.break_refs(mymergelist)
14536                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
14537                                 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
14538                         del mydepgraph, mymergelist
14539                         clear_caches(trees)
14540
14541                         retval = mergetask.merge()
14542                         merge_count = mergetask.curval
14543                 else:
14544                         if "resume" in mtimedb and \
14545                         "mergelist" in mtimedb["resume"] and \
14546                         len(mtimedb["resume"]["mergelist"]) > 1:
14547                                 mtimedb["resume_backup"] = mtimedb["resume"]
14548                                 del mtimedb["resume"]
14549                                 mtimedb.commit()
14550                         mtimedb["resume"]={}
14551                         # Stored as a dict starting with portage-2.1.6_rc1, and supported
14552                         # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
14553                         # a list type for options.
14554                         mtimedb["resume"]["myopts"] = myopts.copy()
14555
14556                         # Convert Atom instances to plain str.
14557                         mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
14558
14559                         if ("--digest" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14560                                 for pkgline in mydepgraph.altlist():
14561                                         if pkgline[0]=="ebuild" and pkgline[3]=="merge":
14562                                                 y = trees[pkgline[1]]["porttree"].dbapi.findname(pkgline[2])
14563                                                 tmpsettings = portage.config(clone=settings)
14564                                                 edebug = 0
14565                                                 if settings.get("PORTAGE_DEBUG", "") == "1":
14566                                                         edebug = 1
14567                                                 retval = portage.doebuild(
14568                                                         y, "digest", settings["ROOT"], tmpsettings, edebug,
14569                                                         ("--pretend" in myopts),
14570                                                         mydbapi=trees[pkgline[1]]["porttree"].dbapi,
14571                                                         tree="porttree")
14572
14573                         pkglist = mydepgraph.altlist()
14574                         mydepgraph.saveNomergeFavorites()
14575                         mydepgraph.break_refs(pkglist)
14576                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
14577                                 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
14578                         del mydepgraph, pkglist
14579                         clear_caches(trees)
14580
14581                         retval = mergetask.merge()
14582                         merge_count = mergetask.curval
14583
14584                 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
14585                         if "yes" == settings.get("AUTOCLEAN"):
14586                                 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
14587                                 unmerge(trees[settings["ROOT"]]["root_config"],
14588                                         myopts, "clean", [],
14589                                         ldpath_mtimes, autoclean=1)
14590                         else:
14591                                 portage.writemsg_stdout(colorize("WARN", "WARNING:")
14592                                         + " AUTOCLEAN is disabled.  This can cause serious"
14593                                         + " problems due to overlapping packages.\n")
14594                         trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
14595
14596                 return retval
14597
14598 def multiple_actions(action1, action2):
14599         sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
14600         sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
14601         sys.exit(1)
14602
14603 def insert_optional_args(args):
14604         """
14605         Parse optional arguments and insert a value if one has
14606         not been provided. This is done before feeding the args
14607         to the optparse parser since that parser does not support
14608         this feature natively.
14609         """
14610
14611         new_args = []
14612         jobs_opts = ("-j", "--jobs")
14613         arg_stack = args[:]
14614         arg_stack.reverse()
14615         while arg_stack:
14616                 arg = arg_stack.pop()
14617
14618                 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
14619                 if not (short_job_opt or arg in jobs_opts):
14620                         new_args.append(arg)
14621                         continue
14622
14623                 # Insert an empty placeholder in order to
14624                 # satisfy the requirements of optparse.
14625
14626                 new_args.append("--jobs")
14627                 job_count = None
14628                 saved_opts = None
14629                 if short_job_opt and len(arg) > 2:
14630                         if arg[:2] == "-j":
14631                                 try:
14632                                         job_count = int(arg[2:])
14633                                 except ValueError:
14634                                         saved_opts = arg[2:]
14635                         else:
14636                                 job_count = "True"
14637                                 saved_opts = arg[1:].replace("j", "")
14638
14639                 if job_count is None and arg_stack:
14640                         try:
14641                                 job_count = int(arg_stack[-1])
14642                         except ValueError:
14643                                 pass
14644                         else:
14645                                 # Discard the job count from the stack
14646                                 # since we're consuming it here.
14647                                 arg_stack.pop()
14648
14649                 if job_count is None:
14650                         # unlimited number of jobs
14651                         new_args.append("True")
14652                 else:
14653                         new_args.append(str(job_count))
14654
14655                 if saved_opts is not None:
14656                         new_args.append("-" + saved_opts)
14657
14658         return new_args
14659
14660 def parse_opts(tmpcmdline, silent=False):
14661         myaction=None
14662         myopts = {}
14663         myfiles=[]
14664
14665         global actions, options, shortmapping
14666
14667         longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
14668         argument_options = {
14669                 "--config-root": {
14670                         "help":"specify the location for portage configuration files",
14671                         "action":"store"
14672                 },
14673                 "--color": {
14674                         "help":"enable or disable color output",
14675                         "type":"choice",
14676                         "choices":("y", "n")
14677                 },
14678
14679                 "--jobs": {
14680
14681                         "help"   : "Specifies the number of packages to build " + \
14682                                 "simultaneously.",
14683
14684                         "action" : "store"
14685                 },
14686
14687                 "--load-average": {
14688
14689                         "help"   :"Specifies that no new builds should be started " + \
14690                                 "if there are other builds running and the load average " + \
14691                                 "is at least LOAD (a floating-point number).",
14692
14693                         "action" : "store"
14694                 },
14695
14696                 "--with-bdeps": {
14697                         "help":"include unnecessary build time dependencies",
14698                         "type":"choice",
14699                         "choices":("y", "n")
14700                 },
14701                 "--reinstall": {
14702                         "help":"specify conditions to trigger package reinstallation",
14703                         "type":"choice",
14704                         "choices":["changed-use"]
14705                 }
14706         }
14707
14708         from optparse import OptionParser
14709         parser = OptionParser()
14710         if parser.has_option("--help"):
14711                 parser.remove_option("--help")
14712
14713         for action_opt in actions:
14714                 parser.add_option("--" + action_opt, action="store_true",
14715                         dest=action_opt.replace("-", "_"), default=False)
14716         for myopt in options:
14717                 parser.add_option(myopt, action="store_true",
14718                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
14719         for shortopt, longopt in shortmapping.iteritems():
14720                 parser.add_option("-" + shortopt, action="store_true",
14721                         dest=longopt.lstrip("--").replace("-", "_"), default=False)
14722         for myalias, myopt in longopt_aliases.iteritems():
14723                 parser.add_option(myalias, action="store_true",
14724                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
14725
14726         for myopt, kwargs in argument_options.iteritems():
14727                 parser.add_option(myopt,
14728                         dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
14729
14730         tmpcmdline = insert_optional_args(tmpcmdline)
14731
14732         myoptions, myargs = parser.parse_args(args=tmpcmdline)
14733
14734         if myoptions.jobs:
14735                 jobs = None
14736                 if myoptions.jobs == "True":
14737                         jobs = True
14738                 else:
14739                         try:
14740                                 jobs = int(myoptions.jobs)
14741                         except ValueError:
14742                                 jobs = -1
14743
14744                 if jobs is not True and \
14745                         jobs < 1:
14746                         jobs = None
14747                         if not silent:
14748                                 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
14749                                         (myoptions.jobs,), noiselevel=-1)
14750
14751                 myoptions.jobs = jobs
14752
14753         if myoptions.load_average:
14754                 try:
14755                         load_average = float(myoptions.load_average)
14756                 except ValueError:
14757                         load_average = 0.0
14758
14759                 if load_average <= 0.0:
14760                         load_average = None
14761                         if not silent:
14762                                 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
14763                                         (myoptions.load_average,), noiselevel=-1)
14764
14765                 myoptions.load_average = load_average
14766
14767         for myopt in options:
14768                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
14769                 if v:
14770                         myopts[myopt] = True
14771
14772         for myopt in argument_options:
14773                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
14774                 if v is not None:
14775                         myopts[myopt] = v
14776
14777         for action_opt in actions:
14778                 v = getattr(myoptions, action_opt.replace("-", "_"))
14779                 if v:
14780                         if myaction:
14781                                 multiple_actions(myaction, action_opt)
14782                                 sys.exit(1)
14783                         myaction = action_opt
14784
14785         myfiles += myargs
14786
14787         return myaction, myopts, myfiles
14788
14789 def validate_ebuild_environment(trees):
14790         for myroot in trees:
14791                 settings = trees[myroot]["vartree"].settings
14792                 settings.validate()
14793
14794 def clear_caches(trees):
14795         for d in trees.itervalues():
14796                 d["porttree"].dbapi.melt()
14797                 d["porttree"].dbapi._aux_cache.clear()
14798                 d["bintree"].dbapi._aux_cache.clear()
14799                 d["bintree"].dbapi._clear_cache()
14800                 d["vartree"].dbapi.linkmap._clear_cache()
14801         portage.dircache.clear()
14802         gc.collect()
14803
14804 def load_emerge_config(trees=None):
14805         kwargs = {}
14806         for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
14807                 v = os.environ.get(envvar, None)
14808                 if v and v.strip():
14809                         kwargs[k] = v
14810         trees = portage.create_trees(trees=trees, **kwargs)
14811
14812         for root, root_trees in trees.iteritems():
14813                 settings = root_trees["vartree"].settings
14814                 setconfig = load_default_config(settings, root_trees)
14815                 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
14816
14817         settings = trees["/"]["vartree"].settings
14818
14819         for myroot in trees:
14820                 if myroot != "/":
14821                         settings = trees[myroot]["vartree"].settings
14822                         break
14823
14824         mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
14825         mtimedb = portage.MtimeDB(mtimedbfile)
14826         
14827         return settings, trees, mtimedb
14828
14829 def adjust_config(myopts, settings):
14830         """Make emerge specific adjustments to the config."""
14831
14832         # To enhance usability, make some vars case insensitive by forcing them to
14833         # lower case.
14834         for myvar in ("AUTOCLEAN", "NOCOLOR"):
14835                 if myvar in settings:
14836                         settings[myvar] = settings[myvar].lower()
14837                         settings.backup_changes(myvar)
14838         del myvar
14839
14840         # Kill noauto as it will break merges otherwise.
14841         if "noauto" in settings.features:
14842                 while "noauto" in settings.features:
14843                         settings.features.remove("noauto")
14844                 settings["FEATURES"] = " ".join(settings.features)
14845                 settings.backup_changes("FEATURES")
14846
14847         CLEAN_DELAY = 5
14848         try:
14849                 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
14850         except ValueError, e:
14851                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14852                 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
14853                         settings["CLEAN_DELAY"], noiselevel=-1)
14854         settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
14855         settings.backup_changes("CLEAN_DELAY")
14856
14857         EMERGE_WARNING_DELAY = 10
14858         try:
14859                 EMERGE_WARNING_DELAY = int(settings.get(
14860                         "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
14861         except ValueError, e:
14862                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14863                 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
14864                         settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
14865         settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
14866         settings.backup_changes("EMERGE_WARNING_DELAY")
14867
14868         if "--quiet" in myopts:
14869                 settings["PORTAGE_QUIET"]="1"
14870                 settings.backup_changes("PORTAGE_QUIET")
14871
14872         if "--verbose" in myopts:
14873                 settings["PORTAGE_VERBOSE"] = "1"
14874                 settings.backup_changes("PORTAGE_VERBOSE")
14875
14876         # Set so that configs will be merged regardless of remembered status
14877         if ("--noconfmem" in myopts):
14878                 settings["NOCONFMEM"]="1"
14879                 settings.backup_changes("NOCONFMEM")
14880
14881         # Set various debug markers... They should be merged somehow.
14882         PORTAGE_DEBUG = 0
14883         try:
14884                 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
14885                 if PORTAGE_DEBUG not in (0, 1):
14886                         portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
14887                                 PORTAGE_DEBUG, noiselevel=-1)
14888                         portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
14889                                 noiselevel=-1)
14890                         PORTAGE_DEBUG = 0
14891         except ValueError, e:
14892                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14893                 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
14894                         settings["PORTAGE_DEBUG"], noiselevel=-1)
14895                 del e
14896         if "--debug" in myopts:
14897                 PORTAGE_DEBUG = 1
14898         settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
14899         settings.backup_changes("PORTAGE_DEBUG")
14900
14901         if settings.get("NOCOLOR") not in ("yes","true"):
14902                 portage.output.havecolor = 1
14903
14904         """The explicit --color < y | n > option overrides the NOCOLOR environment
14905         variable and stdout auto-detection."""
14906         if "--color" in myopts:
14907                 if "y" == myopts["--color"]:
14908                         portage.output.havecolor = 1
14909                         settings["NOCOLOR"] = "false"
14910                 else:
14911                         portage.output.havecolor = 0
14912                         settings["NOCOLOR"] = "true"
14913                 settings.backup_changes("NOCOLOR")
14914         elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
14915                 portage.output.havecolor = 0
14916                 settings["NOCOLOR"] = "true"
14917                 settings.backup_changes("NOCOLOR")
14918
14919 def apply_priorities(settings):
14920         ionice(settings)
14921         nice(settings)
14922
14923 def nice(settings):
14924         try:
14925                 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
14926         except (OSError, ValueError), e:
14927                 out = portage.output.EOutput()
14928                 out.eerror("Failed to change nice value to '%s'" % \
14929                         settings["PORTAGE_NICENESS"])
14930                 out.eerror("%s\n" % str(e))
14931
14932 def ionice(settings):
14933
14934         ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
14935         if ionice_cmd:
14936                 ionice_cmd = shlex.split(ionice_cmd)
14937         if not ionice_cmd:
14938                 return
14939
14940         from portage.util import varexpand
14941         variables = {"PID" : str(os.getpid())}
14942         cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
14943
14944         try:
14945                 rval = portage.process.spawn(cmd, env=os.environ)
14946         except portage.exception.CommandNotFound:
14947                 # The OS kernel probably doesn't support ionice,
14948                 # so return silently.
14949                 return
14950
14951         if rval != os.EX_OK:
14952                 out = portage.output.EOutput()
14953                 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
14954                 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
14955
14956 def display_missing_pkg_set(root_config, set_name):
14957
14958         msg = []
14959         msg.append(("emerge: There are no sets to satisfy '%s'. " + \
14960                 "The following sets exist:") % \
14961                 colorize("INFORM", set_name))
14962         msg.append("")
14963
14964         for s in sorted(root_config.sets):
14965                 msg.append("    %s" % s)
14966         msg.append("")
14967
14968         writemsg_level("".join("%s\n" % l for l in msg),
14969                 level=logging.ERROR, noiselevel=-1)
14970
14971 def expand_set_arguments(myfiles, myaction, root_config):
14972         retval = os.EX_OK
14973         setconfig = root_config.setconfig
14974
14975         sets = setconfig.getSets()
14976
14977         # In order to know exactly which atoms/sets should be added to the
14978         # world file, the depgraph performs set expansion later. It will get
14979         # confused about where the atoms came from if it's not allowed to
14980         # expand them itself.
14981         do_not_expand = (None, )
14982         newargs = []
14983         for a in myfiles:
14984                 if a in ("system", "world"):
14985                         newargs.append(SETPREFIX+a)
14986                 else:
14987                         newargs.append(a)
14988         myfiles = newargs
14989         del newargs
14990         newargs = []
14991
14992         # separators for set arguments
14993         ARG_START = "{"
14994         ARG_END = "}"
14995
14996         # WARNING: all operators must be of equal length
14997         IS_OPERATOR = "/@"
14998         DIFF_OPERATOR = "-@"
14999         UNION_OPERATOR = "+@"
15000         
15001         for i in range(0, len(myfiles)):
15002                 if myfiles[i].startswith(SETPREFIX):
15003                         start = 0
15004                         end = 0
15005                         x = myfiles[i][len(SETPREFIX):]
15006                         newset = ""
15007                         while x:
15008                                 start = x.find(ARG_START)
15009                                 end = x.find(ARG_END)
15010                                 if start > 0 and start < end:
15011                                         namepart = x[:start]
15012                                         argpart = x[start+1:end]
15013                                 
15014                                         # TODO: implement proper quoting
15015                                         args = argpart.split(",")
15016                                         options = {}
15017                                         for a in args:
15018                                                 if "=" in a:
15019                                                         k, v  = a.split("=", 1)
15020                                                         options[k] = v
15021                                                 else:
15022                                                         options[a] = "True"
15023                                         setconfig.update(namepart, options)
15024                                         newset += (x[:start-len(namepart)]+namepart)
15025                                         x = x[end+len(ARG_END):]
15026                                 else:
15027                                         newset += x
15028                                         x = ""
15029                         myfiles[i] = SETPREFIX+newset
15030                                 
15031         sets = setconfig.getSets()
15032
15033         # display errors that occured while loading the SetConfig instance
15034         for e in setconfig.errors:
15035                 print colorize("BAD", "Error during set creation: %s" % e)
15036         
15037         # emerge relies on the existance of sets with names "world" and "system"
15038         required_sets = ("world", "system")
15039         missing_sets = []
15040
15041         for s in required_sets:
15042                 if s not in sets:
15043                         missing_sets.append(s)
15044         if missing_sets:
15045                 if len(missing_sets) > 2:
15046                         missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
15047                         missing_sets_str += ', and "%s"' % missing_sets[-1]
15048                 elif len(missing_sets) == 2:
15049                         missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
15050                 else:
15051                         missing_sets_str = '"%s"' % missing_sets[-1]
15052                 msg = ["emerge: incomplete set configuration, " + \
15053                         "missing set(s): %s" % missing_sets_str]
15054                 if sets:
15055                         msg.append("        sets defined: %s" % ", ".join(sets))
15056                 msg.append("        This usually means that '%s'" % \
15057                         (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
15058                 msg.append("        is missing or corrupt.")
15059                 for line in msg:
15060                         writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
15061                 return (None, 1)
15062         unmerge_actions = ("unmerge", "prune", "clean", "depclean")
15063
15064         for a in myfiles:
15065                 if a.startswith(SETPREFIX):
15066                         # support simple set operations (intersection, difference and union)
15067                         # on the commandline. Expressions are evaluated strictly left-to-right
15068                         if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
15069                                 expression = a[len(SETPREFIX):]
15070                                 expr_sets = []
15071                                 expr_ops = []
15072                                 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
15073                                         is_pos = expression.rfind(IS_OPERATOR)
15074                                         diff_pos = expression.rfind(DIFF_OPERATOR)
15075                                         union_pos = expression.rfind(UNION_OPERATOR)
15076                                         op_pos = max(is_pos, diff_pos, union_pos)
15077                                         s1 = expression[:op_pos]
15078                                         s2 = expression[op_pos+len(IS_OPERATOR):]
15079                                         op = expression[op_pos:op_pos+len(IS_OPERATOR)]
15080                                         if not s2 in sets:
15081                                                 display_missing_pkg_set(root_config, s2)
15082                                                 return (None, 1)
15083                                         expr_sets.insert(0, s2)
15084                                         expr_ops.insert(0, op)
15085                                         expression = s1
15086                                 if not expression in sets:
15087                                         display_missing_pkg_set(root_config, expression)
15088                                         return (None, 1)
15089                                 expr_sets.insert(0, expression)
15090                                 result = set(setconfig.getSetAtoms(expression))
15091                                 for i in range(0, len(expr_ops)):
15092                                         s2 = setconfig.getSetAtoms(expr_sets[i+1])
15093                                         if expr_ops[i] == IS_OPERATOR:
15094                                                 result.intersection_update(s2)
15095                                         elif expr_ops[i] == DIFF_OPERATOR:
15096                                                 result.difference_update(s2)
15097                                         elif expr_ops[i] == UNION_OPERATOR:
15098                                                 result.update(s2)
15099                                         else:
15100                                                 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
15101                                 newargs.extend(result)
15102                         else:                   
15103                                 s = a[len(SETPREFIX):]
15104                                 if s not in sets:
15105                                         display_missing_pkg_set(root_config, s)
15106                                         return (None, 1)
15107                                 setconfig.active.append(s)
15108                                 try:
15109                                         set_atoms = setconfig.getSetAtoms(s)
15110                                 except portage.exception.PackageSetNotFound, e:
15111                                         writemsg_level(("emerge: the given set '%s' " + \
15112                                                 "contains a non-existent set named '%s'.\n") % \
15113                                                 (s, e), level=logging.ERROR, noiselevel=-1)
15114                                         return (None, 1)
15115                                 if myaction in unmerge_actions and \
15116                                                 not sets[s].supportsOperation("unmerge"):
15117                                         sys.stderr.write("emerge: the given set '%s' does " % s + \
15118                                                 "not support unmerge operations\n")
15119                                         retval = 1
15120                                 elif not set_atoms:
15121                                         print "emerge: '%s' is an empty set" % s
15122                                 elif myaction not in do_not_expand:
15123                                         newargs.extend(set_atoms)
15124                                 else:
15125                                         newargs.append(SETPREFIX+s)
15126                                 for e in sets[s].errors:
15127                                         print e
15128                 else:
15129                         newargs.append(a)
15130         return (newargs, retval)
15131
15132 def repo_name_check(trees):
15133         missing_repo_names = set()
15134         for root, root_trees in trees.iteritems():
15135                 if "porttree" in root_trees:
15136                         portdb = root_trees["porttree"].dbapi
15137                         missing_repo_names.update(portdb.porttrees)
15138                         repos = portdb.getRepositories()
15139                         for r in repos:
15140                                 missing_repo_names.discard(portdb.getRepositoryPath(r))
15141                         if portdb.porttree_root in missing_repo_names and \
15142                                 not os.path.exists(os.path.join(
15143                                 portdb.porttree_root, "profiles")):
15144                                 # This is normal if $PORTDIR happens to be empty,
15145                                 # so don't warn about it.
15146                                 missing_repo_names.remove(portdb.porttree_root)
15147
15148         if missing_repo_names:
15149                 msg = []
15150                 msg.append("WARNING: One or more repositories " + \
15151                         "have missing repo_name entries:")
15152                 msg.append("")
15153                 for p in missing_repo_names:
15154                         msg.append("\t%s/profiles/repo_name" % (p,))
15155                 msg.append("")
15156                 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
15157                         "should be a plain text file containing a unique " + \
15158                         "name for the repository on the first line.", 70))
15159                 writemsg_level("".join("%s\n" % l for l in msg),
15160                         level=logging.WARNING, noiselevel=-1)
15161
15162         return bool(missing_repo_names)
15163
15164 def config_protect_check(trees):
15165         for root, root_trees in trees.iteritems():
15166                 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
15167                         msg = "!!! CONFIG_PROTECT is empty"
15168                         if root != "/":
15169                                 msg += " for '%s'" % root
15170                         writemsg_level(msg, level=logging.WARN, noiselevel=-1)
15171
15172 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
15173
15174         if "--quiet" in myopts:
15175                 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15176                 print "!!! one of the following fully-qualified ebuild names instead:\n"
15177                 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15178                         print "    " + colorize("INFORM", cp)
15179                 return
15180
15181         s = search(root_config, spinner, "--searchdesc" in myopts,
15182                 "--quiet" not in myopts, "--usepkg" in myopts,
15183                 "--usepkgonly" in myopts)
15184         null_cp = portage.dep_getkey(insert_category_into_atom(
15185                 arg, "null"))
15186         cat, atom_pn = portage.catsplit(null_cp)
15187         s.searchkey = atom_pn
15188         for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15189                 s.addCP(cp)
15190         s.output()
15191         print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15192         print "!!! one of the above fully-qualified ebuild names instead.\n"
15193
15194 def profile_check(trees, myaction, myopts):
15195         if myaction in ("info", "sync"):
15196                 return os.EX_OK
15197         elif "--version" in myopts or "--help" in myopts:
15198                 return os.EX_OK
15199         for root, root_trees in trees.iteritems():
15200                 if root_trees["root_config"].settings.profiles:
15201                         continue
15202                 # generate some profile related warning messages
15203                 validate_ebuild_environment(trees)
15204                 msg = "If you have just changed your profile configuration, you " + \
15205                         "should revert back to the previous configuration. Due to " + \
15206                         "your current profile being invalid, allowed actions are " + \
15207                         "limited to --help, --info, --sync, and --version."
15208                 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
15209                         level=logging.ERROR, noiselevel=-1)
15210                 return 1
15211         return os.EX_OK
15212
15213 def emerge_main():
15214         global portage  # NFC why this is necessary now - genone
15215         portage._disable_legacy_globals()
15216         # Disable color until we're sure that it should be enabled (after
15217         # EMERGE_DEFAULT_OPTS has been parsed).
15218         portage.output.havecolor = 0
15219         # This first pass is just for options that need to be known as early as
15220         # possible, such as --config-root.  They will be parsed again later,
15221         # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
15222         # the value of --config-root).
15223         myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
15224         if "--debug" in myopts:
15225                 os.environ["PORTAGE_DEBUG"] = "1"
15226         if "--config-root" in myopts:
15227                 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
15228
15229         # Portage needs to ensure a sane umask for the files it creates.
15230         os.umask(022)
15231         settings, trees, mtimedb = load_emerge_config()
15232         portdb = trees[settings["ROOT"]]["porttree"].dbapi
15233         rval = profile_check(trees, myaction, myopts)
15234         if rval != os.EX_OK:
15235                 return rval
15236
15237         if portage._global_updates(trees, mtimedb["updates"]):
15238                 mtimedb.commit()
15239                 # Reload the whole config from scratch.
15240                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15241                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15242
15243         xterm_titles = "notitles" not in settings.features
15244
15245         tmpcmdline = []
15246         if "--ignore-default-opts" not in myopts:
15247                 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
15248         tmpcmdline.extend(sys.argv[1:])
15249         myaction, myopts, myfiles = parse_opts(tmpcmdline)
15250
15251         if "--digest" in myopts:
15252                 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
15253                 # Reload the whole config from scratch so that the portdbapi internal
15254                 # config is updated with new FEATURES.
15255                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15256                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15257
15258         for myroot in trees:
15259                 mysettings =  trees[myroot]["vartree"].settings
15260                 mysettings.unlock()
15261                 adjust_config(myopts, mysettings)
15262                 if "--pretend" not in myopts:
15263                         mysettings["PORTAGE_COUNTER_HASH"] = \
15264                                 trees[myroot]["vartree"].dbapi._counter_hash()
15265                         mysettings.backup_changes("PORTAGE_COUNTER_HASH")
15266                 mysettings.lock()
15267                 del myroot, mysettings
15268
15269         apply_priorities(settings)
15270
15271         spinner = stdout_spinner()
15272         if "candy" in settings.features:
15273                 spinner.update = spinner.update_scroll
15274
15275         if "--quiet" not in myopts:
15276                 portage.deprecated_profile_check(settings=settings)
15277                 repo_name_check(trees)
15278                 config_protect_check(trees)
15279
15280         eclasses_overridden = {}
15281         for mytrees in trees.itervalues():
15282                 mydb = mytrees["porttree"].dbapi
15283                 # Freeze the portdbapi for performance (memoize all xmatch results).
15284                 mydb.freeze()
15285                 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
15286         del mytrees, mydb
15287
15288         if eclasses_overridden and \
15289                 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
15290                 prefix = bad(" * ")
15291                 if len(eclasses_overridden) == 1:
15292                         writemsg(prefix + "Overlay eclass overrides " + \
15293                                 "eclass from PORTDIR:\n", noiselevel=-1)
15294                 else:
15295                         writemsg(prefix + "Overlay eclasses override " + \
15296                                 "eclasses from PORTDIR:\n", noiselevel=-1)
15297                 writemsg(prefix + "\n", noiselevel=-1)
15298                 for eclass_name in sorted(eclasses_overridden):
15299                         writemsg(prefix + "  '%s/%s.eclass'\n" % \
15300                                 (eclasses_overridden[eclass_name], eclass_name),
15301                                 noiselevel=-1)
15302                 writemsg(prefix + "\n", noiselevel=-1)
15303                 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
15304                 "because it will trigger invalidation of cached ebuild metadata " + \
15305                 "that is distributed with the portage tree. If you must " + \
15306                 "override eclasses from PORTDIR then you are advised to add " + \
15307                 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
15308                 "`emerge --regen` after each time that you run `emerge --sync`. " + \
15309                 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
15310                 "you would like to disable this warning."
15311                 from textwrap import wrap
15312                 for line in wrap(msg, 72):
15313                         writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
15314
15315         if "moo" in myfiles:
15316                 print """
15317
15318   Larry loves Gentoo (""" + platform.system() + """)
15319
15320  _______________________
15321 < Have you mooed today? >
15322  -----------------------
15323         \   ^__^
15324          \  (oo)\_______
15325             (__)\       )\/\ 
15326                 ||----w |
15327                 ||     ||
15328
15329 """
15330
15331         for x in myfiles:
15332                 ext = os.path.splitext(x)[1]
15333                 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
15334                         print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
15335                         break
15336
15337         root_config = trees[settings["ROOT"]]["root_config"]
15338         if myaction == "list-sets":
15339                 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
15340                 sys.stdout.flush()
15341                 return os.EX_OK
15342
15343         # only expand sets for actions taking package arguments
15344         oldargs = myfiles[:]
15345         if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
15346                 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
15347                 if retval != os.EX_OK:
15348                         return retval
15349
15350                 # Need to handle empty sets specially, otherwise emerge will react 
15351                 # with the help message for empty argument lists
15352                 if oldargs and not myfiles:
15353                         print "emerge: no targets left after set expansion"
15354                         return 0
15355
15356         if ("--tree" in myopts) and ("--columns" in myopts):
15357                 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
15358                 return 1
15359
15360         if ("--quiet" in myopts):
15361                 spinner.update = spinner.update_quiet
15362                 portage.util.noiselimit = -1
15363
15364         # Always create packages if FEATURES=buildpkg
15365         # Imply --buildpkg if --buildpkgonly
15366         if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
15367                 if "--buildpkg" not in myopts:
15368                         myopts["--buildpkg"] = True
15369
15370         # Also allow -S to invoke search action (-sS)
15371         if ("--searchdesc" in myopts):
15372                 if myaction and myaction != "search":
15373                         myfiles.append(myaction)
15374                 if "--search" not in myopts:
15375                         myopts["--search"] = True
15376                 myaction = "search"
15377
15378         # Always try and fetch binary packages if FEATURES=getbinpkg
15379         if ("getbinpkg" in settings.features):
15380                 myopts["--getbinpkg"] = True
15381
15382         if "--buildpkgonly" in myopts:
15383                 # --buildpkgonly will not merge anything, so
15384                 # it cancels all binary package options.
15385                 for opt in ("--getbinpkg", "--getbinpkgonly",
15386                         "--usepkg", "--usepkgonly"):
15387                         myopts.pop(opt, None)
15388
15389         if "--fetch-all-uri" in myopts:
15390                 myopts["--fetchonly"] = True
15391
15392         if "--skipfirst" in myopts and "--resume" not in myopts:
15393                 myopts["--resume"] = True
15394
15395         if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
15396                 myopts["--usepkgonly"] = True
15397
15398         if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
15399                 myopts["--getbinpkg"] = True
15400
15401         if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
15402                 myopts["--usepkg"] = True
15403
15404         # Also allow -K to apply --usepkg/-k
15405         if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
15406                 myopts["--usepkg"] = True
15407
15408         # Allow -p to remove --ask
15409         if ("--pretend" in myopts) and ("--ask" in myopts):
15410                 print ">>> --pretend disables --ask... removing --ask from options."
15411                 del myopts["--ask"]
15412
15413         # forbid --ask when not in a terminal
15414         # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
15415         if ("--ask" in myopts) and (not sys.stdin.isatty()):
15416                 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
15417                         noiselevel=-1)
15418                 return 1
15419
15420         if settings.get("PORTAGE_DEBUG", "") == "1":
15421                 spinner.update = spinner.update_quiet
15422                 portage.debug=1
15423                 if "python-trace" in settings.features:
15424                         import portage.debug
15425                         portage.debug.set_trace(True)
15426
15427         if not ("--quiet" in myopts):
15428                 if not sys.stdout.isatty() or ("--nospinner" in myopts):
15429                         spinner.update = spinner.update_basic
15430
15431         if "--version" in myopts:
15432                 print getportageversion(settings["PORTDIR"], settings["ROOT"],
15433                         settings.profile_path, settings["CHOST"],
15434                         trees[settings["ROOT"]]["vartree"].dbapi)
15435                 return 0
15436         elif "--help" in myopts:
15437                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15438                 return 0
15439
15440         if "--debug" in myopts:
15441                 print "myaction", myaction
15442                 print "myopts", myopts
15443
15444         if not myaction and not myfiles and "--resume" not in myopts:
15445                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15446                 return 1
15447
15448         pretend = "--pretend" in myopts
15449         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
15450         buildpkgonly = "--buildpkgonly" in myopts
15451
15452         # check if root user is the current user for the actions where emerge needs this
15453         if portage.secpass < 2:
15454                 # We've already allowed "--version" and "--help" above.
15455                 if "--pretend" not in myopts and myaction not in ("search","info"):
15456                         need_superuser = not \
15457                                 (fetchonly or \
15458                                 (buildpkgonly and secpass >= 1) or \
15459                                 myaction in ("metadata", "regen") or \
15460                                 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
15461                         if portage.secpass < 1 or \
15462                                 need_superuser:
15463                                 if need_superuser:
15464                                         access_desc = "superuser"
15465                                 else:
15466                                         access_desc = "portage group"
15467                                 # Always show portage_group_warning() when only portage group
15468                                 # access is required but the user is not in the portage group.
15469                                 from portage.data import portage_group_warning
15470                                 if "--ask" in myopts:
15471                                         myopts["--pretend"] = True
15472                                         del myopts["--ask"]
15473                                         print ("%s access is required... " + \
15474                                                 "adding --pretend to options.\n") % access_desc
15475                                         if portage.secpass < 1 and not need_superuser:
15476                                                 portage_group_warning()
15477                                 else:
15478                                         sys.stderr.write(("emerge: %s access is " + \
15479                                                 "required.\n\n") % access_desc)
15480                                         if portage.secpass < 1 and not need_superuser:
15481                                                 portage_group_warning()
15482                                         return 1
15483
15484         disable_emergelog = False
15485         for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
15486                 if x in myopts:
15487                         disable_emergelog = True
15488                         break
15489         if myaction in ("search", "info"):
15490                 disable_emergelog = True
15491         if disable_emergelog:
15492                 """ Disable emergelog for everything except build or unmerge
15493                 operations.  This helps minimize parallel emerge.log entries that can
15494                 confuse log parsers.  We especially want it disabled during
15495                 parallel-fetch, which uses --resume --fetchonly."""
15496                 global emergelog
15497                 def emergelog(*pargs, **kargs):
15498                         pass
15499
15500         if not "--pretend" in myopts:
15501                 emergelog(xterm_titles, "Started emerge on: "+\
15502                         time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
15503                 myelogstr=""
15504                 if myopts:
15505                         myelogstr=" ".join(myopts)
15506                 if myaction:
15507                         myelogstr+=" "+myaction
15508                 if myfiles:
15509                         myelogstr += " " + " ".join(oldargs)
15510                 emergelog(xterm_titles, " *** emerge " + myelogstr)
15511         del oldargs
15512
15513         def emergeexitsig(signum, frame):
15514                 signal.signal(signal.SIGINT, signal.SIG_IGN)
15515                 signal.signal(signal.SIGTERM, signal.SIG_IGN)
15516                 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
15517                 sys.exit(100+signum)
15518         signal.signal(signal.SIGINT, emergeexitsig)
15519         signal.signal(signal.SIGTERM, emergeexitsig)
15520
15521         def emergeexit():
15522                 """This gets out final log message in before we quit."""
15523                 if "--pretend" not in myopts:
15524                         emergelog(xterm_titles, " *** terminating.")
15525                 if "notitles" not in settings.features:
15526                         xtermTitleReset()
15527         portage.atexit_register(emergeexit)
15528
15529         if myaction in ("config", "metadata", "regen", "sync"):
15530                 if "--pretend" in myopts:
15531                         sys.stderr.write(("emerge: The '%s' action does " + \
15532                                 "not support '--pretend'.\n") % myaction)
15533                         return 1
15534
15535         if "sync" == myaction:
15536                 return action_sync(settings, trees, mtimedb, myopts, myaction)
15537         elif "metadata" == myaction:
15538                 action_metadata(settings, portdb, myopts)
15539         elif myaction=="regen":
15540                 validate_ebuild_environment(trees)
15541                 return action_regen(settings, portdb, myopts.get("--jobs"),
15542                         myopts.get("--load-average"))
15543         # HELP action
15544         elif "config"==myaction:
15545                 validate_ebuild_environment(trees)
15546                 action_config(settings, trees, myopts, myfiles)
15547
15548         # SEARCH action
15549         elif "search"==myaction:
15550                 validate_ebuild_environment(trees)
15551                 action_search(trees[settings["ROOT"]]["root_config"],
15552                         myopts, myfiles, spinner)
15553         elif myaction in ("clean", "unmerge") or \
15554                 (myaction == "prune" and "--nodeps" in myopts):
15555                 validate_ebuild_environment(trees)
15556
15557                 # Ensure atoms are valid before calling unmerge().
15558                 # For backward compat, leading '=' is not required.
15559                 for x in myfiles:
15560                         if is_valid_package_atom(x) or \
15561                                 is_valid_package_atom("=" + x):
15562                                 continue
15563                         msg = []
15564                         msg.append("'%s' is not a valid package atom." % (x,))
15565                         msg.append("Please check ebuild(5) for full details.")
15566                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15567                                 level=logging.ERROR, noiselevel=-1)
15568                         return 1
15569
15570                 # When given a list of atoms, unmerge
15571                 # them in the order given.
15572                 ordered = myaction == "unmerge"
15573                 if 1 == unmerge(root_config, myopts, myaction, myfiles,
15574                         mtimedb["ldpath"], ordered=ordered):
15575                         if not (buildpkgonly or fetchonly or pretend):
15576                                 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15577
15578         elif myaction in ("depclean", "info", "prune"):
15579
15580                 # Ensure atoms are valid before calling unmerge().
15581                 vardb = trees[settings["ROOT"]]["vartree"].dbapi
15582                 valid_atoms = []
15583                 for x in myfiles:
15584                         if is_valid_package_atom(x):
15585                                 try:
15586                                         valid_atoms.append(
15587                                                 portage.dep_expand(x, mydb=vardb, settings=settings))
15588                                 except portage.exception.AmbiguousPackageName, e:
15589                                         msg = "The short ebuild name \"" + x + \
15590                                                 "\" is ambiguous.  Please specify " + \
15591                                                 "one of the following " + \
15592                                                 "fully-qualified ebuild names instead:"
15593                                         for line in textwrap.wrap(msg, 70):
15594                                                 writemsg_level("!!! %s\n" % (line,),
15595                                                         level=logging.ERROR, noiselevel=-1)
15596                                         for i in e[0]:
15597                                                 writemsg_level("    %s\n" % colorize("INFORM", i),
15598                                                         level=logging.ERROR, noiselevel=-1)
15599                                         writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
15600                                         return 1
15601                                 continue
15602                         msg = []
15603                         msg.append("'%s' is not a valid package atom." % (x,))
15604                         msg.append("Please check ebuild(5) for full details.")
15605                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15606                                 level=logging.ERROR, noiselevel=-1)
15607                         return 1
15608
15609                 if myaction == "info":
15610                         return action_info(settings, trees, myopts, valid_atoms)
15611
15612                 validate_ebuild_environment(trees)
15613                 action_depclean(settings, trees, mtimedb["ldpath"],
15614                         myopts, myaction, valid_atoms, spinner)
15615                 if not (buildpkgonly or fetchonly or pretend):
15616                         post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15617         # "update", "system", or just process files:
15618         else:
15619                 validate_ebuild_environment(trees)
15620                 if "--pretend" not in myopts:
15621                         display_news_notification(root_config, myopts)
15622                 retval = action_build(settings, trees, mtimedb,
15623                         myopts, myaction, myfiles, spinner)
15624                 root_config = trees[settings["ROOT"]]["root_config"]
15625                 post_emerge(root_config, myopts, mtimedb, retval)
15626
15627                 return retval