Inside depgraph._merge_order_bias(), promote deep system runtime deps toward
[portage.git] / pym / _emerge / __init__.py
1 #!/usr/bin/python -O
2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id$
5
6 import array
7 from collections import deque
8 import fcntl
9 import formatter
10 import logging
11 import pwd
12 import select
13 import shlex
14 import shutil
15 import signal
16 import sys
17 import textwrap
18 import urlparse
19 import weakref
20 import gc
21 import os, stat
22 import platform
23
24 try:
25         import portage
26 except ImportError:
27         from os import path as osp
28         sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
29         import portage
30
31 from portage import digraph
32 from portage.const import NEWS_LIB_PATH
33
34 import _emerge.help
35 import portage.xpak, commands, errno, re, socket, time
36 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
37         nc_len, red, teal, turquoise, xtermTitle, \
38         xtermTitleReset, yellow
39 from portage.output import create_color_func
40 good = create_color_func("GOOD")
41 bad = create_color_func("BAD")
42 # white looks bad on terminals with white background
43 from portage.output import bold as white
44
45 import portage.elog
46 import portage.dep
47 portage.dep._dep_check_strict = True
48 import portage.util
49 import portage.locks
50 import portage.exception
51 from portage.data import secpass
52 from portage.elog.messages import eerror
53 from portage.util import normalize_path as normpath
54 from portage.util import cmp_sort_key, writemsg, writemsg_level
55 from portage.sets import load_default_config, SETPREFIX
56 from portage.sets.base import InternalPackageSet
57
58 from itertools import chain, izip
59
60 try:
61         import cPickle as pickle
62 except ImportError:
63         import pickle
64
65 try:
66         from cStringIO import StringIO
67 except ImportError:
68         from StringIO import StringIO
69
70 class stdout_spinner(object):
71         scroll_msgs = [
72                 "Gentoo Rocks ("+platform.system()+")",
73                 "Thank you for using Gentoo. :)",
74                 "Are you actually trying to read this?",
75                 "How many times have you stared at this?",
76                 "We are generating the cache right now",
77                 "You are paying too much attention.",
78                 "A theory is better than its explanation.",
79                 "Phasers locked on target, Captain.",
80                 "Thrashing is just virtual crashing.",
81                 "To be is to program.",
82                 "Real Users hate Real Programmers.",
83                 "When all else fails, read the instructions.",
84                 "Functionality breeds Contempt.",
85                 "The future lies ahead.",
86                 "3.1415926535897932384626433832795028841971694",
87                 "Sometimes insanity is the only alternative.",
88                 "Inaccuracy saves a world of explanation.",
89         ]
90
91         twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
92
93         def __init__(self):
94                 self.spinpos = 0
95                 self.update = self.update_twirl
96                 self.scroll_sequence = self.scroll_msgs[
97                         int(time.time() * 100) % len(self.scroll_msgs)]
98                 self.last_update = 0
99                 self.min_display_latency = 0.05
100
101         def _return_early(self):
102                 """
103                 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
104                 each update* method should return without doing any output when this
105                 method returns True.
106                 """
107                 cur_time = time.time()
108                 if cur_time - self.last_update < self.min_display_latency:
109                         return True
110                 self.last_update = cur_time
111                 return False
112
113         def update_basic(self):
114                 self.spinpos = (self.spinpos + 1) % 500
115                 if self._return_early():
116                         return
117                 if (self.spinpos % 100) == 0:
118                         if self.spinpos == 0:
119                                 sys.stdout.write(". ")
120                         else:
121                                 sys.stdout.write(".")
122                 sys.stdout.flush()
123
124         def update_scroll(self):
125                 if self._return_early():
126                         return
127                 if(self.spinpos >= len(self.scroll_sequence)):
128                         sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
129                                 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
130                 else:
131                         sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
132                 sys.stdout.flush()
133                 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
134
135         def update_twirl(self):
136                 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
137                 if self._return_early():
138                         return
139                 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
140                 sys.stdout.flush()
141
142         def update_quiet(self):
143                 return
144
145 def userquery(prompt, responses=None, colours=None):
146         """Displays a prompt and a set of responses, then waits for a response
147         which is checked against the responses and the first to match is
148         returned.  An empty response will match the first value in responses.  The
149         input buffer is *not* cleared prior to the prompt!
150
151         prompt: a String.
152         responses: a List of Strings.
153         colours: a List of Functions taking and returning a String, used to
154         process the responses for display. Typically these will be functions
155         like red() but could be e.g. lambda x: "DisplayString".
156         If responses is omitted, defaults to ["Yes", "No"], [green, red].
157         If only colours is omitted, defaults to [bold, ...].
158
159         Returns a member of the List responses. (If called without optional
160         arguments, returns "Yes" or "No".)
161         KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
162         printed."""
163         if responses is None:
164                 responses = ["Yes", "No"]
165                 colours = [
166                         create_color_func("PROMPT_CHOICE_DEFAULT"),
167                         create_color_func("PROMPT_CHOICE_OTHER")
168                 ]
169         elif colours is None:
170                 colours=[bold]
171         colours=(colours*len(responses))[:len(responses)]
172         print bold(prompt),
173         try:
174                 while True:
175                         response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
176                         for key in responses:
177                                 # An empty response will match the first value in responses.
178                                 if response.upper()==key[:len(response)].upper():
179                                         return key
180                         print "Sorry, response '%s' not understood." % response,
181         except (EOFError, KeyboardInterrupt):
182                 print "Interrupted."
183                 sys.exit(1)
184
185 actions = frozenset([
186 "clean", "config", "depclean",
187 "info", "list-sets", "metadata",
188 "prune", "regen",  "search",
189 "sync",  "unmerge",
190 ])
191 options=[
192 "--ask",          "--alphabetical",
193 "--buildpkg",     "--buildpkgonly",
194 "--changelog",    "--columns",
195 "--complete-graph",
196 "--debug",        "--deep",
197 "--digest",
198 "--emptytree",
199 "--fetchonly",    "--fetch-all-uri",
200 "--getbinpkg",    "--getbinpkgonly",
201 "--help",         "--ignore-default-opts",
202 "--keep-going",
203 "--noconfmem",
204 "--newuse",       "--nocolor",
205 "--nodeps",       "--noreplace",
206 "--nospinner",    "--oneshot",
207 "--onlydeps",     "--pretend",
208 "--quiet",        "--resume",
209 "--searchdesc",   "--selective",
210 "--skipfirst",
211 "--tree",
212 "--update",
213 "--usepkg",       "--usepkgonly",
214 "--verbose",      "--version"
215 ]
216
217 shortmapping={
218 "1":"--oneshot",
219 "a":"--ask",
220 "b":"--buildpkg",  "B":"--buildpkgonly",
221 "c":"--clean",     "C":"--unmerge",
222 "d":"--debug",     "D":"--deep",
223 "e":"--emptytree",
224 "f":"--fetchonly", "F":"--fetch-all-uri",
225 "g":"--getbinpkg", "G":"--getbinpkgonly",
226 "h":"--help",
227 "k":"--usepkg",    "K":"--usepkgonly",
228 "l":"--changelog",
229 "n":"--noreplace", "N":"--newuse",
230 "o":"--onlydeps",  "O":"--nodeps",
231 "p":"--pretend",   "P":"--prune",
232 "q":"--quiet",
233 "s":"--search",    "S":"--searchdesc",
234 "t":"--tree",
235 "u":"--update",
236 "v":"--verbose",   "V":"--version"
237 }
238
239 def emergelog(xterm_titles, mystr, short_msg=None):
240         if xterm_titles and short_msg:
241                 if "HOSTNAME" in os.environ:
242                         short_msg = os.environ["HOSTNAME"]+": "+short_msg
243                 xtermTitle(short_msg)
244         try:
245                 file_path = "/var/log/emerge.log"
246                 mylogfile = open(file_path, "a")
247                 portage.util.apply_secpass_permissions(file_path,
248                         uid=portage.portage_uid, gid=portage.portage_gid,
249                         mode=0660)
250                 mylock = None
251                 try:
252                         mylock = portage.locks.lockfile(mylogfile)
253                         # seek because we may have gotten held up by the lock.
254                         # if so, we may not be positioned at the end of the file.
255                         mylogfile.seek(0, 2)
256                         mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
257                         mylogfile.flush()
258                 finally:
259                         if mylock:
260                                 portage.locks.unlockfile(mylock)
261                         mylogfile.close()
262         except (IOError,OSError,portage.exception.PortageException), e:
263                 if secpass >= 1:
264                         print >> sys.stderr, "emergelog():",e
265
266 def countdown(secs=5, doing="Starting"):
267         if secs:
268                 print ">>> Waiting",secs,"seconds before starting..."
269                 print ">>> (Control-C to abort)...\n"+doing+" in: ",
270                 ticks=range(secs)
271                 ticks.reverse()
272                 for sec in ticks:
273                         sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
274                         sys.stdout.flush()
275                         time.sleep(1)
276                 print
277
278 # formats a size given in bytes nicely
279 def format_size(mysize):
280         if isinstance(mysize, basestring):
281                 return mysize
282         if 0 != mysize % 1024:
283                 # Always round up to the next kB so that it doesn't show 0 kB when
284                 # some small file still needs to be fetched.
285                 mysize += 1024 - mysize % 1024
286         mystr=str(mysize/1024)
287         mycount=len(mystr)
288         while (mycount > 3):
289                 mycount-=3
290                 mystr=mystr[:mycount]+","+mystr[mycount:]
291         return mystr+" kB"
292
293
294 def getgccversion(chost):
295         """
296         rtype: C{str}
297         return:  the current in-use gcc version
298         """
299
300         gcc_ver_command = 'gcc -dumpversion'
301         gcc_ver_prefix = 'gcc-'
302
303         gcc_not_found_error = red(
304         "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
305         "!!! to update the environment of this terminal and possibly\n" +
306         "!!! other terminals also.\n"
307         )
308
309         mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
310         if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
311                 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
312
313         mystatus, myoutput = commands.getstatusoutput(
314                 chost + "-" + gcc_ver_command)
315         if mystatus == os.EX_OK:
316                 return gcc_ver_prefix + myoutput
317
318         mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
319         if mystatus == os.EX_OK:
320                 return gcc_ver_prefix + myoutput
321
322         portage.writemsg(gcc_not_found_error, noiselevel=-1)
323         return "[unavailable]"
324
325 def getportageversion(portdir, target_root, profile, chost, vardb):
326         profilever = "unavailable"
327         if profile:
328                 realpath = os.path.realpath(profile)
329                 basepath   = os.path.realpath(os.path.join(portdir, "profiles"))
330                 if realpath.startswith(basepath):
331                         profilever = realpath[1 + len(basepath):]
332                 else:
333                         try:
334                                 profilever = "!" + os.readlink(profile)
335                         except (OSError):
336                                 pass
337                 del realpath, basepath
338
339         libcver=[]
340         libclist  = vardb.match("virtual/libc")
341         libclist += vardb.match("virtual/glibc")
342         libclist  = portage.util.unique_array(libclist)
343         for x in libclist:
344                 xs=portage.catpkgsplit(x)
345                 if libcver:
346                         libcver+=","+"-".join(xs[1:])
347                 else:
348                         libcver="-".join(xs[1:])
349         if libcver==[]:
350                 libcver="unavailable"
351
352         gccver = getgccversion(chost)
353         unameout=platform.release()+" "+platform.machine()
354
355         return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
356
357 def create_depgraph_params(myopts, myaction):
358         #configure emerge engine parameters
359         #
360         # self:      include _this_ package regardless of if it is merged.
361         # selective: exclude the package if it is merged
362         # recurse:   go into the dependencies
363         # deep:      go into the dependencies of already merged packages
364         # empty:     pretend nothing is merged
365         # complete:  completely account for all known dependencies
366         # remove:    build graph for use in removing packages
367         myparams = set(["recurse"])
368
369         if myaction == "remove":
370                 myparams.add("remove")
371                 myparams.add("complete")
372                 return myparams
373
374         if "--update" in myopts or \
375                 "--newuse" in myopts or \
376                 "--reinstall" in myopts or \
377                 "--noreplace" in myopts:
378                 myparams.add("selective")
379         if "--emptytree" in myopts:
380                 myparams.add("empty")
381                 myparams.discard("selective")
382         if "--nodeps" in myopts:
383                 myparams.discard("recurse")
384         if "--deep" in myopts:
385                 myparams.add("deep")
386         if "--complete-graph" in myopts:
387                 myparams.add("complete")
388         return myparams
389
390 # search functionality
391 class search(object):
392
393         #
394         # class constants
395         #
396         VERSION_SHORT=1
397         VERSION_RELEASE=2
398
399         #
400         # public interface
401         #
402         def __init__(self, root_config, spinner, searchdesc,
403                 verbose, usepkg, usepkgonly):
404                 """Searches the available and installed packages for the supplied search key.
405                 The list of available and installed packages is created at object instantiation.
406                 This makes successive searches faster."""
407                 self.settings = root_config.settings
408                 self.vartree = root_config.trees["vartree"]
409                 self.spinner = spinner
410                 self.verbose = verbose
411                 self.searchdesc = searchdesc
412                 self.root_config = root_config
413                 self.setconfig = root_config.setconfig
414                 self.matches = {"pkg" : []}
415                 self.mlen = 0
416
417                 def fake_portdb():
418                         pass
419                 self.portdb = fake_portdb
420                 for attrib in ("aux_get", "cp_all",
421                         "xmatch", "findname", "getFetchMap"):
422                         setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
423
424                 self._dbs = []
425
426                 portdb = root_config.trees["porttree"].dbapi
427                 bindb = root_config.trees["bintree"].dbapi
428                 vardb = root_config.trees["vartree"].dbapi
429
430                 if not usepkgonly and portdb._have_root_eclass_dir:
431                         self._dbs.append(portdb)
432
433                 if (usepkg or usepkgonly) and bindb.cp_all():
434                         self._dbs.append(bindb)
435
436                 self._dbs.append(vardb)
437                 self._portdb = portdb
438
439         def _cp_all(self):
440                 cp_all = set()
441                 for db in self._dbs:
442                         cp_all.update(db.cp_all())
443                 return list(sorted(cp_all))
444
445         def _aux_get(self, *args, **kwargs):
446                 for db in self._dbs:
447                         try:
448                                 return db.aux_get(*args, **kwargs)
449                         except KeyError:
450                                 pass
451                 raise
452
453         def _findname(self, *args, **kwargs):
454                 for db in self._dbs:
455                         if db is not self._portdb:
456                                 # We don't want findname to return anything
457                                 # unless it's an ebuild in a portage tree.
458                                 # Otherwise, it's already built and we don't
459                                 # care about it.
460                                 continue
461                         func = getattr(db, "findname", None)
462                         if func:
463                                 value = func(*args, **kwargs)
464                                 if value:
465                                         return value
466                 return None
467
468         def _getFetchMap(self, *args, **kwargs):
469                 for db in self._dbs:
470                         func = getattr(db, "getFetchMap", None)
471                         if func:
472                                 value = func(*args, **kwargs)
473                                 if value:
474                                         return value
475                 return {}
476
477         def _visible(self, db, cpv, metadata):
478                 installed = db is self.vartree.dbapi
479                 built = installed or db is not self._portdb
480                 pkg_type = "ebuild"
481                 if installed:
482                         pkg_type = "installed"
483                 elif built:
484                         pkg_type = "binary"
485                 return visible(self.settings,
486                         Package(type_name=pkg_type, root_config=self.root_config,
487                         cpv=cpv, built=built, installed=installed, metadata=metadata))
488
489         def _xmatch(self, level, atom):
490                 """
491                 This method does not expand old-style virtuals because it
492                 is restricted to returning matches for a single ${CATEGORY}/${PN}
493                 and old-style virual matches unreliable for that when querying
494                 multiple package databases. If necessary, old-style virtuals
495                 can be performed on atoms prior to calling this method.
496                 """
497                 cp = portage.dep_getkey(atom)
498                 if level == "match-all":
499                         matches = set()
500                         for db in self._dbs:
501                                 if hasattr(db, "xmatch"):
502                                         matches.update(db.xmatch(level, atom))
503                                 else:
504                                         matches.update(db.match(atom))
505                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
506                         db._cpv_sort_ascending(result)
507                 elif level == "match-visible":
508                         matches = set()
509                         for db in self._dbs:
510                                 if hasattr(db, "xmatch"):
511                                         matches.update(db.xmatch(level, atom))
512                                 else:
513                                         db_keys = list(db._aux_cache_keys)
514                                         for cpv in db.match(atom):
515                                                 metadata = izip(db_keys,
516                                                         db.aux_get(cpv, db_keys))
517                                                 if not self._visible(db, cpv, metadata):
518                                                         continue
519                                                 matches.add(cpv)
520                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
521                         db._cpv_sort_ascending(result)
522                 elif level == "bestmatch-visible":
523                         result = None
524                         for db in self._dbs:
525                                 if hasattr(db, "xmatch"):
526                                         cpv = db.xmatch("bestmatch-visible", atom)
527                                         if not cpv or portage.cpv_getkey(cpv) != cp:
528                                                 continue
529                                         if not result or cpv == portage.best([cpv, result]):
530                                                 result = cpv
531                                 else:
532                                         db_keys = Package.metadata_keys
533                                         # break out of this loop with highest visible
534                                         # match, checked in descending order
535                                         for cpv in reversed(db.match(atom)):
536                                                 if portage.cpv_getkey(cpv) != cp:
537                                                         continue
538                                                 metadata = izip(db_keys,
539                                                         db.aux_get(cpv, db_keys))
540                                                 if not self._visible(db, cpv, metadata):
541                                                         continue
542                                                 if not result or cpv == portage.best([cpv, result]):
543                                                         result = cpv
544                                                 break
545                 else:
546                         raise NotImplementedError(level)
547                 return result
548
549         def execute(self,searchkey):
550                 """Performs the search for the supplied search key"""
551                 match_category = 0
552                 self.searchkey=searchkey
553                 self.packagematches = []
554                 if self.searchdesc:
555                         self.searchdesc=1
556                         self.matches = {"pkg":[], "desc":[], "set":[]}
557                 else:
558                         self.searchdesc=0
559                         self.matches = {"pkg":[], "set":[]}
560                 print "Searching...   ",
561
562                 regexsearch = False
563                 if self.searchkey.startswith('%'):
564                         regexsearch = True
565                         self.searchkey = self.searchkey[1:]
566                 if self.searchkey.startswith('@'):
567                         match_category = 1
568                         self.searchkey = self.searchkey[1:]
569                 if regexsearch:
570                         self.searchre=re.compile(self.searchkey,re.I)
571                 else:
572                         self.searchre=re.compile(re.escape(self.searchkey), re.I)
573                 for package in self.portdb.cp_all():
574                         self.spinner.update()
575
576                         if match_category:
577                                 match_string  = package[:]
578                         else:
579                                 match_string  = package.split("/")[-1]
580
581                         masked=0
582                         if self.searchre.search(match_string):
583                                 if not self.portdb.xmatch("match-visible", package):
584                                         masked=1
585                                 self.matches["pkg"].append([package,masked])
586                         elif self.searchdesc: # DESCRIPTION searching
587                                 full_package = self.portdb.xmatch("bestmatch-visible", package)
588                                 if not full_package:
589                                         #no match found; we don't want to query description
590                                         full_package = portage.best(
591                                                 self.portdb.xmatch("match-all", package))
592                                         if not full_package:
593                                                 continue
594                                         else:
595                                                 masked=1
596                                 try:
597                                         full_desc = self.portdb.aux_get(
598                                                 full_package, ["DESCRIPTION"])[0]
599                                 except KeyError:
600                                         print "emerge: search: aux_get() failed, skipping"
601                                         continue
602                                 if self.searchre.search(full_desc):
603                                         self.matches["desc"].append([full_package,masked])
604
605                 self.sdict = self.setconfig.getSets()
606                 for setname in self.sdict:
607                         self.spinner.update()
608                         if match_category:
609                                 match_string = setname
610                         else:
611                                 match_string = setname.split("/")[-1]
612                         
613                         if self.searchre.search(match_string):
614                                 self.matches["set"].append([setname, False])
615                         elif self.searchdesc:
616                                 if self.searchre.search(
617                                         self.sdict[setname].getMetadata("DESCRIPTION")):
618                                         self.matches["set"].append([setname, False])
619                         
620                 self.mlen=0
621                 for mtype in self.matches:
622                         self.matches[mtype].sort()
623                         self.mlen += len(self.matches[mtype])
624
625         def addCP(self, cp):
626                 if not self.portdb.xmatch("match-all", cp):
627                         return
628                 masked = 0
629                 if not self.portdb.xmatch("bestmatch-visible", cp):
630                         masked = 1
631                 self.matches["pkg"].append([cp, masked])
632                 self.mlen += 1
633
634         def output(self):
635                 """Outputs the results of the search."""
636                 print "\b\b  \n[ Results for search key : "+white(self.searchkey)+" ]"
637                 print "[ Applications found : "+white(str(self.mlen))+" ]"
638                 print " "
639                 vardb = self.vartree.dbapi
640                 for mtype in self.matches:
641                         for match,masked in self.matches[mtype]:
642                                 full_package = None
643                                 if mtype == "pkg":
644                                         catpack = match
645                                         full_package = self.portdb.xmatch(
646                                                 "bestmatch-visible", match)
647                                         if not full_package:
648                                                 #no match found; we don't want to query description
649                                                 masked=1
650                                                 full_package = portage.best(
651                                                         self.portdb.xmatch("match-all",match))
652                                 elif mtype == "desc":
653                                         full_package = match
654                                         match        = portage.cpv_getkey(match)
655                                 elif mtype == "set":
656                                         print green("*")+"  "+white(match)
657                                         print "     ", darkgreen("Description:")+"  ", self.sdict[match].getMetadata("DESCRIPTION")
658                                         print
659                                 if full_package:
660                                         try:
661                                                 desc, homepage, license = self.portdb.aux_get(
662                                                         full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
663                                         except KeyError:
664                                                 print "emerge: search: aux_get() failed, skipping"
665                                                 continue
666                                         if masked:
667                                                 print green("*")+"  "+white(match)+" "+red("[ Masked ]")
668                                         else:
669                                                 print green("*")+"  "+white(match)
670                                         myversion = self.getVersion(full_package, search.VERSION_RELEASE)
671
672                                         mysum = [0,0]
673                                         file_size_str = None
674                                         mycat = match.split("/")[0]
675                                         mypkg = match.split("/")[1]
676                                         mycpv = match + "-" + myversion
677                                         myebuild = self.portdb.findname(mycpv)
678                                         if myebuild:
679                                                 pkgdir = os.path.dirname(myebuild)
680                                                 from portage import manifest
681                                                 mf = manifest.Manifest(
682                                                         pkgdir, self.settings["DISTDIR"])
683                                                 try:
684                                                         uri_map = self.portdb.getFetchMap(mycpv)
685                                                 except portage.exception.InvalidDependString, e:
686                                                         file_size_str = "Unknown (%s)" % (e,)
687                                                         del e
688                                                 else:
689                                                         try:
690                                                                 mysum[0] = mf.getDistfilesSize(uri_map)
691                                                         except KeyError, e:
692                                                                 file_size_str = "Unknown (missing " + \
693                                                                         "digest for %s)" % (e,)
694                                                                 del e
695
696                                         available = False
697                                         for db in self._dbs:
698                                                 if db is not vardb and \
699                                                         db.cpv_exists(mycpv):
700                                                         available = True
701                                                         if not myebuild and hasattr(db, "bintree"):
702                                                                 myebuild = db.bintree.getname(mycpv)
703                                                                 try:
704                                                                         mysum[0] = os.stat(myebuild).st_size
705                                                                 except OSError:
706                                                                         myebuild = None
707                                                         break
708
709                                         if myebuild and file_size_str is None:
710                                                 mystr = str(mysum[0] / 1024)
711                                                 mycount = len(mystr)
712                                                 while (mycount > 3):
713                                                         mycount -= 3
714                                                         mystr = mystr[:mycount] + "," + mystr[mycount:]
715                                                 file_size_str = mystr + " kB"
716
717                                         if self.verbose:
718                                                 if available:
719                                                         print "     ", darkgreen("Latest version available:"),myversion
720                                                 print "     ", self.getInstallationStatus(mycat+'/'+mypkg)
721                                                 if myebuild:
722                                                         print "      %s %s" % \
723                                                                 (darkgreen("Size of files:"), file_size_str)
724                                                 print "     ", darkgreen("Homepage:")+"     ",homepage
725                                                 print "     ", darkgreen("Description:")+"  ",desc
726                                                 print "     ", darkgreen("License:")+"      ",license
727                                                 print
728         #
729         # private interface
730         #
731         def getInstallationStatus(self,package):
732                 installed_package = self.vartree.dep_bestmatch(package)
733                 result = ""
734                 version = self.getVersion(installed_package,search.VERSION_RELEASE)
735                 if len(version) > 0:
736                         result = darkgreen("Latest version installed:")+" "+version
737                 else:
738                         result = darkgreen("Latest version installed:")+" [ Not Installed ]"
739                 return result
740
741         def getVersion(self,full_package,detail):
742                 if len(full_package) > 1:
743                         package_parts = portage.catpkgsplit(full_package)
744                         if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
745                                 result = package_parts[2]+ "-" + package_parts[3]
746                         else:
747                                 result = package_parts[2]
748                 else:
749                         result = ""
750                 return result
751
752 class RootConfig(object):
753         """This is used internally by depgraph to track information about a
754         particular $ROOT."""
755
756         pkg_tree_map = {
757                 "ebuild"    : "porttree",
758                 "binary"    : "bintree",
759                 "installed" : "vartree"
760         }
761
762         tree_pkg_map = {}
763         for k, v in pkg_tree_map.iteritems():
764                 tree_pkg_map[v] = k
765
766         def __init__(self, settings, trees, setconfig):
767                 self.trees = trees
768                 self.settings = settings
769                 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
770                 self.root = self.settings["ROOT"]
771                 self.setconfig = setconfig
772                 self.sets = self.setconfig.getSets()
773                 self.visible_pkgs = PackageVirtualDbapi(self.settings)
774
775 def create_world_atom(pkg, args_set, root_config):
776         """Create a new atom for the world file if one does not exist.  If the
777         argument atom is precise enough to identify a specific slot then a slot
778         atom will be returned. Atoms that are in the system set may also be stored
779         in world since system atoms can only match one slot while world atoms can
780         be greedy with respect to slots.  Unslotted system packages will not be
781         stored in world."""
782
783         arg_atom = args_set.findAtomForPackage(pkg)
784         if not arg_atom:
785                 return None
786         cp = portage.dep_getkey(arg_atom)
787         new_world_atom = cp
788         sets = root_config.sets
789         portdb = root_config.trees["porttree"].dbapi
790         vardb = root_config.trees["vartree"].dbapi
791         available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
792                 for cpv in portdb.match(cp))
793         slotted = len(available_slots) > 1 or \
794                 (len(available_slots) == 1 and "0" not in available_slots)
795         if not slotted:
796                 # check the vdb in case this is multislot
797                 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
798                         for cpv in vardb.match(cp))
799                 slotted = len(available_slots) > 1 or \
800                         (len(available_slots) == 1 and "0" not in available_slots)
801         if slotted and arg_atom != cp:
802                 # If the user gave a specific atom, store it as a
803                 # slot atom in the world file.
804                 slot_atom = pkg.slot_atom
805
806                 # For USE=multislot, there are a couple of cases to
807                 # handle here:
808                 #
809                 # 1) SLOT="0", but the real SLOT spontaneously changed to some
810                 #    unknown value, so just record an unslotted atom.
811                 #
812                 # 2) SLOT comes from an installed package and there is no
813                 #    matching SLOT in the portage tree.
814                 #
815                 # Make sure that the slot atom is available in either the
816                 # portdb or the vardb, since otherwise the user certainly
817                 # doesn't want the SLOT atom recorded in the world file
818                 # (case 1 above).  If it's only available in the vardb,
819                 # the user may be trying to prevent a USE=multislot
820                 # package from being removed by --depclean (case 2 above).
821
822                 mydb = portdb
823                 if not portdb.match(slot_atom):
824                         # SLOT seems to come from an installed multislot package
825                         mydb = vardb
826                 # If there is no installed package matching the SLOT atom,
827                 # it probably changed SLOT spontaneously due to USE=multislot,
828                 # so just record an unslotted atom.
829                 if vardb.match(slot_atom):
830                         # Now verify that the argument is precise
831                         # enough to identify a specific slot.
832                         matches = mydb.match(arg_atom)
833                         matched_slots = set()
834                         for cpv in matches:
835                                 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
836                         if len(matched_slots) == 1:
837                                 new_world_atom = slot_atom
838
839         if new_world_atom == sets["world"].findAtomForPackage(pkg):
840                 # Both atoms would be identical, so there's nothing to add.
841                 return None
842         if not slotted:
843                 # Unlike world atoms, system atoms are not greedy for slots, so they
844                 # can't be safely excluded from world if they are slotted.
845                 system_atom = sets["system"].findAtomForPackage(pkg)
846                 if system_atom:
847                         if not portage.dep_getkey(system_atom).startswith("virtual/"):
848                                 return None
849                         # System virtuals aren't safe to exclude from world since they can
850                         # match multiple old-style virtuals but only one of them will be
851                         # pulled in by update or depclean.
852                         providers = portdb.mysettings.getvirtuals().get(
853                                 portage.dep_getkey(system_atom))
854                         if providers and len(providers) == 1 and providers[0] == cp:
855                                 return None
856         return new_world_atom
857
858 def filter_iuse_defaults(iuse):
859         for flag in iuse:
860                 if flag.startswith("+") or flag.startswith("-"):
861                         yield flag[1:]
862                 else:
863                         yield flag
864
865 class SlotObject(object):
866         __slots__ = ("__weakref__",)
867
868         def __init__(self, **kwargs):
869                 classes = [self.__class__]
870                 while classes:
871                         c = classes.pop()
872                         if c is SlotObject:
873                                 continue
874                         classes.extend(c.__bases__)
875                         slots = getattr(c, "__slots__", None)
876                         if not slots:
877                                 continue
878                         for myattr in slots:
879                                 myvalue = kwargs.get(myattr, None)
880                                 setattr(self, myattr, myvalue)
881
882         def copy(self):
883                 """
884                 Create a new instance and copy all attributes
885                 defined from __slots__ (including those from
886                 inherited classes).
887                 """
888                 obj = self.__class__()
889
890                 classes = [self.__class__]
891                 while classes:
892                         c = classes.pop()
893                         if c is SlotObject:
894                                 continue
895                         classes.extend(c.__bases__)
896                         slots = getattr(c, "__slots__", None)
897                         if not slots:
898                                 continue
899                         for myattr in slots:
900                                 setattr(obj, myattr, getattr(self, myattr))
901
902                 return obj
903
904 class AbstractDepPriority(SlotObject):
905         __slots__ = ("buildtime", "runtime", "runtime_post")
906
907         def __lt__(self, other):
908                 return self.__int__() < other
909
910         def __le__(self, other):
911                 return self.__int__() <= other
912
913         def __eq__(self, other):
914                 return self.__int__() == other
915
916         def __ne__(self, other):
917                 return self.__int__() != other
918
919         def __gt__(self, other):
920                 return self.__int__() > other
921
922         def __ge__(self, other):
923                 return self.__int__() >= other
924
925         def copy(self):
926                 import copy
927                 return copy.copy(self)
928
929 class DepPriority(AbstractDepPriority):
930
931         __slots__ = ("satisfied", "optional", "rebuild")
932
933         def __int__(self):
934                 return 0
935
936         def __str__(self):
937                 if self.optional:
938                         return "optional"
939                 if self.buildtime:
940                         return "buildtime"
941                 if self.runtime:
942                         return "runtime"
943                 if self.runtime_post:
944                         return "runtime_post"
945                 return "soft"
946
947 class BlockerDepPriority(DepPriority):
948         __slots__ = ()
949         def __int__(self):
950                 return 0
951
952         def __str__(self):
953                 return 'blocker'
954
955 BlockerDepPriority.instance = BlockerDepPriority()
956
957 class UnmergeDepPriority(AbstractDepPriority):
958         __slots__ = ("optional", "satisfied",)
959         """
960         Combination of properties           Priority  Category
961
962         runtime                                0       HARD
963         runtime_post                          -1       HARD
964         buildtime                             -2       SOFT
965         (none of the above)                   -2       SOFT
966         """
967
968         MAX    =  0
969         SOFT   = -2
970         MIN    = -2
971
972         def __int__(self):
973                 if self.runtime:
974                         return 0
975                 if self.runtime_post:
976                         return -1
977                 if self.buildtime:
978                         return -2
979                 return -2
980
981         def __str__(self):
982                 myvalue = self.__int__()
983                 if myvalue > self.SOFT:
984                         return "hard"
985                 return "soft"
986
987 class DepPriorityNormalRange(object):
988         """
989         DepPriority properties              Index      Category
990
991         buildtime                                      HARD
992         runtime                                3       MEDIUM
993         runtime_post                           2       MEDIUM_SOFT
994         optional                               1       SOFT
995         (none of the above)                    0       NONE
996         """
997         MEDIUM      = 3
998         MEDIUM_SOFT = 2
999         SOFT        = 1
1000         NONE        = 0
1001
1002         @classmethod
1003         def _ignore_optional(cls, priority):
1004                 if priority.__class__ is not DepPriority:
1005                         return False
1006                 return bool(priority.optional)
1007
1008         @classmethod
1009         def _ignore_runtime_post(cls, priority):
1010                 if priority.__class__ is not DepPriority:
1011                         return False
1012                 return bool(priority.optional or priority.runtime_post)
1013
1014         @classmethod
1015         def _ignore_runtime(cls, priority):
1016                 if priority.__class__ is not DepPriority:
1017                         return False
1018                 return not priority.buildtime
1019
1020         ignore_medium      = _ignore_runtime
1021         ignore_medium_soft = _ignore_runtime_post
1022         ignore_soft        = _ignore_optional
1023
1024 DepPriorityNormalRange.ignore_priority = (
1025         None,
1026         DepPriorityNormalRange._ignore_optional,
1027         DepPriorityNormalRange._ignore_runtime_post,
1028         DepPriorityNormalRange._ignore_runtime
1029 )
1030
1031 class DepPrioritySatisfiedRange(object):
1032         """
1033         DepPriority                         Index      Category
1034
1035         not satisfied and buildtime                    HARD
1036         not satisfied and runtime              7       MEDIUM
1037         not satisfied and runtime_post         6       MEDIUM_SOFT
1038         satisfied and buildtime and rebuild    5       SOFT
1039         satisfied and buildtime                4       SOFT
1040         satisfied and runtime                  3       SOFT
1041         satisfied and runtime_post             2       SOFT
1042         optional                               1       SOFT
1043         (none of the above)                    0       NONE
1044         """
1045         MEDIUM      = 7
1046         MEDIUM_SOFT = 6
1047         SOFT        = 5
1048         NONE        = 0
1049
1050         @classmethod
1051         def _ignore_optional(cls, priority):
1052                 if priority.__class__ is not DepPriority:
1053                         return False
1054                 return bool(priority.optional)
1055
1056         @classmethod
1057         def _ignore_satisfied_runtime_post(cls, priority):
1058                 if priority.__class__ is not DepPriority:
1059                         return False
1060                 if priority.optional:
1061                         return True
1062                 if not priority.satisfied:
1063                         return False
1064                 return bool(priority.runtime_post)
1065
1066         @classmethod
1067         def _ignore_satisfied_runtime(cls, priority):
1068                 if priority.__class__ is not DepPriority:
1069                         return False
1070                 if priority.optional:
1071                         return True
1072                 if not priority.satisfied:
1073                         return False
1074                 return not priority.buildtime
1075
1076         @classmethod
1077         def _ignore_satisfied_buildtime(cls, priority):
1078                 if priority.__class__ is not DepPriority:
1079                         return False
1080                 if priority.optional:
1081                         return True
1082                 if not priority.satisfied:
1083                         return False
1084                 if priority.buildtime:
1085                         return not priority.rebuild
1086                 return True
1087
1088         @classmethod
1089         def _ignore_satisfied_buildtime_rebuild(cls, priority):
1090                 if priority.__class__ is not DepPriority:
1091                         return False
1092                 if priority.optional:
1093                         return True
1094                 return bool(priority.satisfied)
1095
1096         @classmethod
1097         def _ignore_runtime_post(cls, priority):
1098                 if priority.__class__ is not DepPriority:
1099                         return False
1100                 return bool(priority.optional or \
1101                         priority.satisfied or \
1102                         priority.runtime_post)
1103
1104         @classmethod
1105         def _ignore_runtime(cls, priority):
1106                 if priority.__class__ is not DepPriority:
1107                         return False
1108                 return bool(priority.satisfied or \
1109                         not priority.buildtime)
1110
1111         ignore_medium      = _ignore_runtime
1112         ignore_medium_soft = _ignore_runtime_post
1113         ignore_soft        = _ignore_satisfied_buildtime_rebuild
1114
1115 DepPrioritySatisfiedRange.ignore_priority = (
1116         None,
1117         DepPrioritySatisfiedRange._ignore_optional,
1118         DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1119         DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1120         DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1121         DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1122         DepPrioritySatisfiedRange._ignore_runtime_post,
1123         DepPrioritySatisfiedRange._ignore_runtime
1124 )
1125
1126 def _find_deep_system_runtime_deps(graph):
1127         deep_system_deps = set()
1128         node_stack = []
1129         for node in graph:
1130                 if not isinstance(node, Package) or \
1131                         node.operation == 'uninstall':
1132                         continue
1133                 if node.root_config.sets['system'].findAtomForPackage(node):
1134                         node_stack.append(node)
1135
1136         def ignore_priority(priority):
1137                 """
1138                 Ignore non-runtime priorities.
1139                 """
1140                 if isinstance(priority, DepPriority) and \
1141                         (priority.runtime or priority.runtime_post):
1142                         return False
1143                 return True
1144
1145         while node_stack:
1146                 node = node_stack.pop()
1147                 if node in deep_system_deps:
1148                         continue
1149                 deep_system_deps.add(node)
1150                 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1151                         if not isinstance(child, Package) or \
1152                                 child.operation == 'uninstall':
1153                                 continue
1154                         node_stack.append(child)
1155
1156         return deep_system_deps
1157
1158 class FakeVartree(portage.vartree):
1159         """This is implements an in-memory copy of a vartree instance that provides
1160         all the interfaces required for use by the depgraph.  The vardb is locked
1161         during the constructor call just long enough to read a copy of the
1162         installed package information.  This allows the depgraph to do it's
1163         dependency calculations without holding a lock on the vardb.  It also
1164         allows things like vardb global updates to be done in memory so that the
1165         user doesn't necessarily need write access to the vardb in cases where
1166         global updates are necessary (updates are performed when necessary if there
1167         is not a matching ebuild in the tree)."""
1168         def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1169                 self._root_config = root_config
1170                 if pkg_cache is None:
1171                         pkg_cache = {}
1172                 real_vartree = root_config.trees["vartree"]
1173                 portdb = root_config.trees["porttree"].dbapi
1174                 self.root = real_vartree.root
1175                 self.settings = real_vartree.settings
1176                 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1177                 if "_mtime_" not in mykeys:
1178                         mykeys.append("_mtime_")
1179                 self._db_keys = mykeys
1180                 self._pkg_cache = pkg_cache
1181                 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1182                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1183                 try:
1184                         # At least the parent needs to exist for the lock file.
1185                         portage.util.ensure_dirs(vdb_path)
1186                 except portage.exception.PortageException:
1187                         pass
1188                 vdb_lock = None
1189                 try:
1190                         if acquire_lock and os.access(vdb_path, os.W_OK):
1191                                 vdb_lock = portage.locks.lockdir(vdb_path)
1192                         real_dbapi = real_vartree.dbapi
1193                         slot_counters = {}
1194                         for cpv in real_dbapi.cpv_all():
1195                                 cache_key = ("installed", self.root, cpv, "nomerge")
1196                                 pkg = self._pkg_cache.get(cache_key)
1197                                 if pkg is not None:
1198                                         metadata = pkg.metadata
1199                                 else:
1200                                         metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1201                                 myslot = metadata["SLOT"]
1202                                 mycp = portage.dep_getkey(cpv)
1203                                 myslot_atom = "%s:%s" % (mycp, myslot)
1204                                 try:
1205                                         mycounter = long(metadata["COUNTER"])
1206                                 except ValueError:
1207                                         mycounter = 0
1208                                         metadata["COUNTER"] = str(mycounter)
1209                                 other_counter = slot_counters.get(myslot_atom, None)
1210                                 if other_counter is not None:
1211                                         if other_counter > mycounter:
1212                                                 continue
1213                                 slot_counters[myslot_atom] = mycounter
1214                                 if pkg is None:
1215                                         pkg = Package(built=True, cpv=cpv,
1216                                                 installed=True, metadata=metadata,
1217                                                 root_config=root_config, type_name="installed")
1218                                 self._pkg_cache[pkg] = pkg
1219                                 self.dbapi.cpv_inject(pkg)
1220                         real_dbapi.flush_cache()
1221                 finally:
1222                         if vdb_lock:
1223                                 portage.locks.unlockdir(vdb_lock)
1224                 # Populate the old-style virtuals using the cached values.
1225                 if not self.settings.treeVirtuals:
1226                         self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1227                                 portage.getCPFromCPV, self.get_all_provides())
1228
1229                 # Intialize variables needed for lazy cache pulls of the live ebuild
1230                 # metadata.  This ensures that the vardb lock is released ASAP, without
1231                 # being delayed in case cache generation is triggered.
1232                 self._aux_get = self.dbapi.aux_get
1233                 self.dbapi.aux_get = self._aux_get_wrapper
1234                 self._match = self.dbapi.match
1235                 self.dbapi.match = self._match_wrapper
1236                 self._aux_get_history = set()
1237                 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1238                 self._portdb = portdb
1239                 self._global_updates = None
1240
1241         def _match_wrapper(self, cpv, use_cache=1):
1242                 """
1243                 Make sure the metadata in Package instances gets updated for any
1244                 cpv that is returned from a match() call, since the metadata can
1245                 be accessed directly from the Package instance instead of via
1246                 aux_get().
1247                 """
1248                 matches = self._match(cpv, use_cache=use_cache)
1249                 for cpv in matches:
1250                         if cpv in self._aux_get_history:
1251                                 continue
1252                         self._aux_get_wrapper(cpv, [])
1253                 return matches
1254
1255         def _aux_get_wrapper(self, pkg, wants):
1256                 if pkg in self._aux_get_history:
1257                         return self._aux_get(pkg, wants)
1258                 self._aux_get_history.add(pkg)
1259                 try:
1260                         # Use the live ebuild metadata if possible.
1261                         live_metadata = dict(izip(self._portdb_keys,
1262                                 self._portdb.aux_get(pkg, self._portdb_keys)))
1263                         if not portage.eapi_is_supported(live_metadata["EAPI"]):
1264                                 raise KeyError(pkg)
1265                         self.dbapi.aux_update(pkg, live_metadata)
1266                 except (KeyError, portage.exception.PortageException):
1267                         if self._global_updates is None:
1268                                 self._global_updates = \
1269                                         grab_global_updates(self._portdb.porttree_root)
1270                         perform_global_updates(
1271                                 pkg, self.dbapi, self._global_updates)
1272                 return self._aux_get(pkg, wants)
1273
1274         def sync(self, acquire_lock=1):
1275                 """
1276                 Call this method to synchronize state with the real vardb
1277                 after one or more packages may have been installed or
1278                 uninstalled.
1279                 """
1280                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1281                 try:
1282                         # At least the parent needs to exist for the lock file.
1283                         portage.util.ensure_dirs(vdb_path)
1284                 except portage.exception.PortageException:
1285                         pass
1286                 vdb_lock = None
1287                 try:
1288                         if acquire_lock and os.access(vdb_path, os.W_OK):
1289                                 vdb_lock = portage.locks.lockdir(vdb_path)
1290                         self._sync()
1291                 finally:
1292                         if vdb_lock:
1293                                 portage.locks.unlockdir(vdb_lock)
1294
1295         def _sync(self):
1296
1297                 real_vardb = self._root_config.trees["vartree"].dbapi
1298                 current_cpv_set = frozenset(real_vardb.cpv_all())
1299                 pkg_vardb = self.dbapi
1300                 aux_get_history = self._aux_get_history
1301
1302                 # Remove any packages that have been uninstalled.
1303                 for pkg in list(pkg_vardb):
1304                         if pkg.cpv not in current_cpv_set:
1305                                 pkg_vardb.cpv_remove(pkg)
1306                                 aux_get_history.discard(pkg.cpv)
1307
1308                 # Validate counters and timestamps.
1309                 slot_counters = {}
1310                 root = self.root
1311                 validation_keys = ["COUNTER", "_mtime_"]
1312                 for cpv in current_cpv_set:
1313
1314                         pkg_hash_key = ("installed", root, cpv, "nomerge")
1315                         pkg = pkg_vardb.get(pkg_hash_key)
1316                         if pkg is not None:
1317                                 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1318                                 try:
1319                                         counter = long(counter)
1320                                 except ValueError:
1321                                         counter = 0
1322
1323                                 if counter != pkg.counter or \
1324                                         mtime != pkg.mtime:
1325                                         pkg_vardb.cpv_remove(pkg)
1326                                         aux_get_history.discard(pkg.cpv)
1327                                         pkg = None
1328
1329                         if pkg is None:
1330                                 pkg = self._pkg(cpv)
1331
1332                         other_counter = slot_counters.get(pkg.slot_atom)
1333                         if other_counter is not None:
1334                                 if other_counter > pkg.counter:
1335                                         continue
1336
1337                         slot_counters[pkg.slot_atom] = pkg.counter
1338                         pkg_vardb.cpv_inject(pkg)
1339
1340                 real_vardb.flush_cache()
1341
1342         def _pkg(self, cpv):
1343                 root_config = self._root_config
1344                 real_vardb = root_config.trees["vartree"].dbapi
1345                 pkg = Package(cpv=cpv, installed=True,
1346                         metadata=izip(self._db_keys,
1347                         real_vardb.aux_get(cpv, self._db_keys)),
1348                         root_config=root_config,
1349                         type_name="installed")
1350
1351                 try:
1352                         mycounter = long(pkg.metadata["COUNTER"])
1353                 except ValueError:
1354                         mycounter = 0
1355                         pkg.metadata["COUNTER"] = str(mycounter)
1356
1357                 return pkg
1358
1359 def grab_global_updates(portdir):
1360         from portage.update import grab_updates, parse_updates
1361         updpath = os.path.join(portdir, "profiles", "updates")
1362         try:
1363                 rawupdates = grab_updates(updpath)
1364         except portage.exception.DirectoryNotFound:
1365                 rawupdates = []
1366         upd_commands = []
1367         for mykey, mystat, mycontent in rawupdates:
1368                 commands, errors = parse_updates(mycontent)
1369                 upd_commands.extend(commands)
1370         return upd_commands
1371
1372 def perform_global_updates(mycpv, mydb, mycommands):
1373         from portage.update import update_dbentries
1374         aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1375         aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1376         updates = update_dbentries(mycommands, aux_dict)
1377         if updates:
1378                 mydb.aux_update(mycpv, updates)
1379
1380 def visible(pkgsettings, pkg):
1381         """
1382         Check if a package is visible. This can raise an InvalidDependString
1383         exception if LICENSE is invalid.
1384         TODO: optionally generate a list of masking reasons
1385         @rtype: Boolean
1386         @returns: True if the package is visible, False otherwise.
1387         """
1388         if not pkg.metadata["SLOT"]:
1389                 return False
1390         if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1391                 if not pkgsettings._accept_chost(pkg):
1392                         return False
1393         eapi = pkg.metadata["EAPI"]
1394         if not portage.eapi_is_supported(eapi):
1395                 return False
1396         if not pkg.installed:
1397                 if portage._eapi_is_deprecated(eapi):
1398                         return False
1399                 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1400                         return False
1401         if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1402                 return False
1403         if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1404                 return False
1405         try:
1406                 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1407                         return False
1408         except portage.exception.InvalidDependString:
1409                 return False
1410         return True
1411
1412 def get_masking_status(pkg, pkgsettings, root_config):
1413
1414         mreasons = portage.getmaskingstatus(
1415                 pkg, settings=pkgsettings,
1416                 portdb=root_config.trees["porttree"].dbapi)
1417
1418         if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1419                 if not pkgsettings._accept_chost(pkg):
1420                         mreasons.append("CHOST: %s" % \
1421                                 pkg.metadata["CHOST"])
1422
1423         if not pkg.metadata["SLOT"]:
1424                 mreasons.append("invalid: SLOT is undefined")
1425
1426         return mreasons
1427
1428 def get_mask_info(root_config, cpv, pkgsettings,
1429         db, pkg_type, built, installed, db_keys):
1430         eapi_masked = False
1431         try:
1432                 metadata = dict(izip(db_keys,
1433                         db.aux_get(cpv, db_keys)))
1434         except KeyError:
1435                 metadata = None
1436         if metadata and not built:
1437                 pkgsettings.setcpv(cpv, mydb=metadata)
1438                 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1439         if metadata is None:
1440                 mreasons = ["corruption"]
1441         else:
1442                 pkg = Package(type_name=pkg_type, root_config=root_config,
1443                         cpv=cpv, built=built, installed=installed, metadata=metadata)
1444                 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1445         return metadata, mreasons
1446
1447 def show_masked_packages(masked_packages):
1448         shown_licenses = set()
1449         shown_comments = set()
1450         # Maybe there is both an ebuild and a binary. Only
1451         # show one of them to avoid redundant appearance.
1452         shown_cpvs = set()
1453         have_eapi_mask = False
1454         for (root_config, pkgsettings, cpv,
1455                 metadata, mreasons) in masked_packages:
1456                 if cpv in shown_cpvs:
1457                         continue
1458                 shown_cpvs.add(cpv)
1459                 comment, filename = None, None
1460                 if "package.mask" in mreasons:
1461                         comment, filename = \
1462                                 portage.getmaskingreason(
1463                                 cpv, metadata=metadata,
1464                                 settings=pkgsettings,
1465                                 portdb=root_config.trees["porttree"].dbapi,
1466                                 return_location=True)
1467                 missing_licenses = []
1468                 if metadata:
1469                         if not portage.eapi_is_supported(metadata["EAPI"]):
1470                                 have_eapi_mask = True
1471                         try:
1472                                 missing_licenses = \
1473                                         pkgsettings._getMissingLicenses(
1474                                                 cpv, metadata)
1475                         except portage.exception.InvalidDependString:
1476                                 # This will have already been reported
1477                                 # above via mreasons.
1478                                 pass
1479
1480                 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1481                 if comment and comment not in shown_comments:
1482                         print filename+":"
1483                         print comment
1484                         shown_comments.add(comment)
1485                 portdb = root_config.trees["porttree"].dbapi
1486                 for l in missing_licenses:
1487                         l_path = portdb.findLicensePath(l)
1488                         if l in shown_licenses:
1489                                 continue
1490                         msg = ("A copy of the '%s' license" + \
1491                         " is located at '%s'.") % (l, l_path)
1492                         print msg
1493                         print
1494                         shown_licenses.add(l)
1495         return have_eapi_mask
1496
1497 class Task(SlotObject):
1498         __slots__ = ("_hash_key", "_hash_value")
1499
1500         def _get_hash_key(self):
1501                 hash_key = getattr(self, "_hash_key", None)
1502                 if hash_key is None:
1503                         raise NotImplementedError(self)
1504                 return hash_key
1505
1506         def __eq__(self, other):
1507                 return self._get_hash_key() == other
1508
1509         def __ne__(self, other):
1510                 return self._get_hash_key() != other
1511
1512         def __hash__(self):
1513                 hash_value = getattr(self, "_hash_value", None)
1514                 if hash_value is None:
1515                         self._hash_value = hash(self._get_hash_key())
1516                 return self._hash_value
1517
1518         def __len__(self):
1519                 return len(self._get_hash_key())
1520
1521         def __getitem__(self, key):
1522                 return self._get_hash_key()[key]
1523
1524         def __iter__(self):
1525                 return iter(self._get_hash_key())
1526
1527         def __contains__(self, key):
1528                 return key in self._get_hash_key()
1529
1530         def __str__(self):
1531                 return str(self._get_hash_key())
1532
1533 class Blocker(Task):
1534
1535         __hash__ = Task.__hash__
1536         __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1537
1538         def __init__(self, **kwargs):
1539                 Task.__init__(self, **kwargs)
1540                 self.cp = portage.dep_getkey(self.atom)
1541
1542         def _get_hash_key(self):
1543                 hash_key = getattr(self, "_hash_key", None)
1544                 if hash_key is None:
1545                         self._hash_key = \
1546                                 ("blocks", self.root, self.atom, self.eapi)
1547                 return self._hash_key
1548
1549 class Package(Task):
1550
1551         __hash__ = Task.__hash__
1552         __slots__ = ("built", "cpv", "depth",
1553                 "installed", "metadata", "onlydeps", "operation",
1554                 "root_config", "type_name",
1555                 "category", "counter", "cp", "cpv_split",
1556                 "inherited", "iuse", "mtime",
1557                 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1558
1559         metadata_keys = [
1560                 "CHOST", "COUNTER", "DEPEND", "EAPI",
1561                 "INHERITED", "IUSE", "KEYWORDS",
1562                 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1563                 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1564
1565         def __init__(self, **kwargs):
1566                 Task.__init__(self, **kwargs)
1567                 self.root = self.root_config.root
1568                 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1569                 self.cp = portage.cpv_getkey(self.cpv)
1570                 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, self.slot))
1571                 self.category, self.pf = portage.catsplit(self.cpv)
1572                 self.cpv_split = portage.catpkgsplit(self.cpv)
1573                 self.pv_split = self.cpv_split[1:]
1574
1575         class _use(object):
1576
1577                 __slots__ = ("__weakref__", "enabled")
1578
1579                 def __init__(self, use):
1580                         self.enabled = frozenset(use)
1581
1582         class _iuse(object):
1583
1584                 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1585
1586                 def __init__(self, tokens, iuse_implicit):
1587                         self.tokens = tuple(tokens)
1588                         self.iuse_implicit = iuse_implicit
1589                         enabled = []
1590                         disabled = []
1591                         other = []
1592                         for x in tokens:
1593                                 prefix = x[:1]
1594                                 if prefix == "+":
1595                                         enabled.append(x[1:])
1596                                 elif prefix == "-":
1597                                         disabled.append(x[1:])
1598                                 else:
1599                                         other.append(x)
1600                         self.enabled = frozenset(enabled)
1601                         self.disabled = frozenset(disabled)
1602                         self.all = frozenset(chain(enabled, disabled, other))
1603
1604                 def __getattribute__(self, name):
1605                         if name == "regex":
1606                                 try:
1607                                         return object.__getattribute__(self, "regex")
1608                                 except AttributeError:
1609                                         all = object.__getattribute__(self, "all")
1610                                         iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1611                                         # Escape anything except ".*" which is supposed
1612                                         # to pass through from _get_implicit_iuse()
1613                                         regex = (re.escape(x) for x in chain(all, iuse_implicit))
1614                                         regex = "^(%s)$" % "|".join(regex)
1615                                         regex = regex.replace("\\.\\*", ".*")
1616                                         self.regex = re.compile(regex)
1617                         return object.__getattribute__(self, name)
1618
1619         def _get_hash_key(self):
1620                 hash_key = getattr(self, "_hash_key", None)
1621                 if hash_key is None:
1622                         if self.operation is None:
1623                                 self.operation = "merge"
1624                                 if self.onlydeps or self.installed:
1625                                         self.operation = "nomerge"
1626                         self._hash_key = \
1627                                 (self.type_name, self.root, self.cpv, self.operation)
1628                 return self._hash_key
1629
1630         def __lt__(self, other):
1631                 if other.cp != self.cp:
1632                         return False
1633                 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1634                         return True
1635                 return False
1636
1637         def __le__(self, other):
1638                 if other.cp != self.cp:
1639                         return False
1640                 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1641                         return True
1642                 return False
1643
1644         def __gt__(self, other):
1645                 if other.cp != self.cp:
1646                         return False
1647                 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1648                         return True
1649                 return False
1650
1651         def __ge__(self, other):
1652                 if other.cp != self.cp:
1653                         return False
1654                 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1655                         return True
1656                 return False
1657
1658 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1659         if not x.startswith("UNUSED_"))
1660 _all_metadata_keys.discard("CDEPEND")
1661 _all_metadata_keys.update(Package.metadata_keys)
1662
1663 from portage.cache.mappings import slot_dict_class
1664 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1665
1666 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1667         """
1668         Detect metadata updates and synchronize Package attributes.
1669         """
1670
1671         __slots__ = ("_pkg",)
1672         _wrapped_keys = frozenset(
1673                 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1674
1675         def __init__(self, pkg, metadata):
1676                 _PackageMetadataWrapperBase.__init__(self)
1677                 self._pkg = pkg
1678                 self.update(metadata)
1679
1680         def __setitem__(self, k, v):
1681                 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1682                 if k in self._wrapped_keys:
1683                         getattr(self, "_set_" + k.lower())(k, v)
1684
1685         def _set_inherited(self, k, v):
1686                 if isinstance(v, basestring):
1687                         v = frozenset(v.split())
1688                 self._pkg.inherited = v
1689
1690         def _set_iuse(self, k, v):
1691                 self._pkg.iuse = self._pkg._iuse(
1692                         v.split(), self._pkg.root_config.iuse_implicit)
1693
1694         def _set_slot(self, k, v):
1695                 self._pkg.slot = v
1696
1697         def _set_use(self, k, v):
1698                 self._pkg.use = self._pkg._use(v.split())
1699
1700         def _set_counter(self, k, v):
1701                 if isinstance(v, basestring):
1702                         try:
1703                                 v = long(v.strip())
1704                         except ValueError:
1705                                 v = 0
1706                 self._pkg.counter = v
1707
1708         def _set__mtime_(self, k, v):
1709                 if isinstance(v, basestring):
1710                         try:
1711                                 v = long(v.strip())
1712                         except ValueError:
1713                                 v = 0
1714                 self._pkg.mtime = v
1715
1716 class EbuildFetchonly(SlotObject):
1717
1718         __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1719
1720         def execute(self):
1721                 settings = self.settings
1722                 pkg = self.pkg
1723                 portdb = pkg.root_config.trees["porttree"].dbapi
1724                 ebuild_path = portdb.findname(pkg.cpv)
1725                 settings.setcpv(pkg)
1726                 debug = settings.get("PORTAGE_DEBUG") == "1"
1727                 use_cache = 1 # always true
1728                 portage.doebuild_environment(ebuild_path, "fetch",
1729                         settings["ROOT"], settings, debug, use_cache, portdb)
1730                 restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1731
1732                 if restrict_fetch:
1733                         rval = self._execute_with_builddir()
1734                 else:
1735                         rval = portage.doebuild(ebuild_path, "fetch",
1736                                 settings["ROOT"], settings, debug=debug,
1737                                 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1738                                 mydbapi=portdb, tree="porttree")
1739
1740                         if rval != os.EX_OK:
1741                                 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1742                                 eerror(msg, phase="unpack", key=pkg.cpv)
1743
1744                 return rval
1745
1746         def _execute_with_builddir(self):
1747                 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1748                 # ensuring sane $PWD (bug #239560) and storing elog
1749                 # messages. Use a private temp directory, in order
1750                 # to avoid locking the main one.
1751                 settings = self.settings
1752                 global_tmpdir = settings["PORTAGE_TMPDIR"]
1753                 from tempfile import mkdtemp
1754                 try:
1755                         private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1756                 except OSError, e:
1757                         if e.errno != portage.exception.PermissionDenied.errno:
1758                                 raise
1759                         raise portage.exception.PermissionDenied(global_tmpdir)
1760                 settings["PORTAGE_TMPDIR"] = private_tmpdir
1761                 settings.backup_changes("PORTAGE_TMPDIR")
1762                 try:
1763                         retval = self._execute()
1764                 finally:
1765                         settings["PORTAGE_TMPDIR"] = global_tmpdir
1766                         settings.backup_changes("PORTAGE_TMPDIR")
1767                         shutil.rmtree(private_tmpdir)
1768                 return retval
1769
1770         def _execute(self):
1771                 settings = self.settings
1772                 pkg = self.pkg
1773                 root_config = pkg.root_config
1774                 portdb = root_config.trees["porttree"].dbapi
1775                 ebuild_path = portdb.findname(pkg.cpv)
1776                 debug = settings.get("PORTAGE_DEBUG") == "1"
1777                 portage.prepare_build_dirs(self.pkg.root, self.settings, 0)
1778
1779                 retval = portage.doebuild(ebuild_path, "fetch",
1780                         self.settings["ROOT"], self.settings, debug=debug,
1781                         listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1782                         mydbapi=portdb, tree="porttree")
1783
1784                 if retval != os.EX_OK:
1785                         msg = "Fetch failed for '%s'" % (pkg.cpv,)
1786                         eerror(msg, phase="unpack", key=pkg.cpv)
1787
1788                 portage.elog.elog_process(self.pkg.cpv, self.settings)
1789                 return retval
1790
1791 class PollConstants(object):
1792
1793         """
1794         Provides POLL* constants that are equivalent to those from the
1795         select module, for use by PollSelectAdapter.
1796         """
1797
1798         names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1799         v = 1
1800         for k in names:
1801                 locals()[k] = getattr(select, k, v)
1802                 v *= 2
1803         del k, v
1804
1805 class AsynchronousTask(SlotObject):
1806         """
1807         Subclasses override _wait() and _poll() so that calls
1808         to public methods can be wrapped for implementing
1809         hooks such as exit listener notification.
1810
1811         Sublasses should call self.wait() to notify exit listeners after
1812         the task is complete and self.returncode has been set.
1813         """
1814
1815         __slots__ = ("background", "cancelled", "returncode") + \
1816                 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1817
1818         def start(self):
1819                 """
1820                 Start an asynchronous task and then return as soon as possible.
1821                 """
1822                 self._start()
1823                 self._start_hook()
1824
1825         def _start(self):
1826                 raise NotImplementedError(self)
1827
1828         def isAlive(self):
1829                 return self.returncode is None
1830
1831         def poll(self):
1832                 self._wait_hook()
1833                 return self._poll()
1834
1835         def _poll(self):
1836                 return self.returncode
1837
1838         def wait(self):
1839                 if self.returncode is None:
1840                         self._wait()
1841                 self._wait_hook()
1842                 return self.returncode
1843
1844         def _wait(self):
1845                 return self.returncode
1846
1847         def cancel(self):
1848                 self.cancelled = True
1849                 self.wait()
1850
1851         def addStartListener(self, f):
1852                 """
1853                 The function will be called with one argument, a reference to self.
1854                 """
1855                 if self._start_listeners is None:
1856                         self._start_listeners = []
1857                 self._start_listeners.append(f)
1858
1859         def removeStartListener(self, f):
1860                 if self._start_listeners is None:
1861                         return
1862                 self._start_listeners.remove(f)
1863
1864         def _start_hook(self):
1865                 if self._start_listeners is not None:
1866                         start_listeners = self._start_listeners
1867                         self._start_listeners = None
1868
1869                         for f in start_listeners:
1870                                 f(self)
1871
1872         def addExitListener(self, f):
1873                 """
1874                 The function will be called with one argument, a reference to self.
1875                 """
1876                 if self._exit_listeners is None:
1877                         self._exit_listeners = []
1878                 self._exit_listeners.append(f)
1879
1880         def removeExitListener(self, f):
1881                 if self._exit_listeners is None:
1882                         if self._exit_listener_stack is not None:
1883                                 self._exit_listener_stack.remove(f)
1884                         return
1885                 self._exit_listeners.remove(f)
1886
1887         def _wait_hook(self):
1888                 """
1889                 Call this method after the task completes, just before returning
1890                 the returncode from wait() or poll(). This hook is
1891                 used to trigger exit listeners when the returncode first
1892                 becomes available.
1893                 """
1894                 if self.returncode is not None and \
1895                         self._exit_listeners is not None:
1896
1897                         # This prevents recursion, in case one of the
1898                         # exit handlers triggers this method again by
1899                         # calling wait(). Use a stack that gives
1900                         # removeExitListener() an opportunity to consume
1901                         # listeners from the stack, before they can get
1902                         # called below. This is necessary because a call
1903                         # to one exit listener may result in a call to
1904                         # removeExitListener() for another listener on
1905                         # the stack. That listener needs to be removed
1906                         # from the stack since it would be inconsistent
1907                         # to call it after it has been been passed into
1908                         # removeExitListener().
1909                         self._exit_listener_stack = self._exit_listeners
1910                         self._exit_listeners = None
1911
1912                         self._exit_listener_stack.reverse()
1913                         while self._exit_listener_stack:
1914                                 self._exit_listener_stack.pop()(self)
1915
1916 class AbstractPollTask(AsynchronousTask):
1917
1918         __slots__ = ("scheduler",) + \
1919                 ("_registered",)
1920
1921         _bufsize = 4096
1922         _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1923         _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1924                 _exceptional_events
1925
1926         def _unregister(self):
1927                 raise NotImplementedError(self)
1928
1929         def _unregister_if_appropriate(self, event):
1930                 if self._registered:
1931                         if event & self._exceptional_events:
1932                                 self._unregister()
1933                                 self.cancel()
1934                         elif event & PollConstants.POLLHUP:
1935                                 self._unregister()
1936                                 self.wait()
1937
1938 class PipeReader(AbstractPollTask):
1939
1940         """
1941         Reads output from one or more files and saves it in memory,
1942         for retrieval via the getvalue() method. This is driven by
1943         the scheduler's poll() loop, so it runs entirely within the
1944         current process.
1945         """
1946
1947         __slots__ = ("input_files",) + \
1948                 ("_read_data", "_reg_ids")
1949
1950         def _start(self):
1951                 self._reg_ids = set()
1952                 self._read_data = []
1953                 for k, f in self.input_files.iteritems():
1954                         fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1955                                 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1956                         self._reg_ids.add(self.scheduler.register(f.fileno(),
1957                                 self._registered_events, self._output_handler))
1958                 self._registered = True
1959
1960         def isAlive(self):
1961                 return self._registered
1962
1963         def cancel(self):
1964                 if self.returncode is None:
1965                         self.returncode = 1
1966                         self.cancelled = True
1967                 self.wait()
1968
1969         def _wait(self):
1970                 if self.returncode is not None:
1971                         return self.returncode
1972
1973                 if self._registered:
1974                         self.scheduler.schedule(self._reg_ids)
1975                         self._unregister()
1976
1977                 self.returncode = os.EX_OK
1978                 return self.returncode
1979
1980         def getvalue(self):
1981                 """Retrieve the entire contents"""
1982                 return "".join(self._read_data)
1983
1984         def close(self):
1985                 """Free the memory buffer."""
1986                 self._read_data = None
1987
1988         def _output_handler(self, fd, event):
1989
1990                 if event & PollConstants.POLLIN:
1991
1992                         for f in self.input_files.itervalues():
1993                                 if fd == f.fileno():
1994                                         break
1995
1996                         buf = array.array('B')
1997                         try:
1998                                 buf.fromfile(f, self._bufsize)
1999                         except EOFError:
2000                                 pass
2001
2002                         if buf:
2003                                 self._read_data.append(buf.tostring())
2004                         else:
2005                                 self._unregister()
2006                                 self.wait()
2007
2008                 self._unregister_if_appropriate(event)
2009                 return self._registered
2010
2011         def _unregister(self):
2012                 """
2013                 Unregister from the scheduler and close open files.
2014                 """
2015
2016                 self._registered = False
2017
2018                 if self._reg_ids is not None:
2019                         for reg_id in self._reg_ids:
2020                                 self.scheduler.unregister(reg_id)
2021                         self._reg_ids = None
2022
2023                 if self.input_files is not None:
2024                         for f in self.input_files.itervalues():
2025                                 f.close()
2026                         self.input_files = None
2027
2028 class CompositeTask(AsynchronousTask):
2029
2030         __slots__ = ("scheduler",) + ("_current_task",)
2031
2032         def isAlive(self):
2033                 return self._current_task is not None
2034
2035         def cancel(self):
2036                 self.cancelled = True
2037                 if self._current_task is not None:
2038                         self._current_task.cancel()
2039
2040         def _poll(self):
2041                 """
2042                 This does a loop calling self._current_task.poll()
2043                 repeatedly as long as the value of self._current_task
2044                 keeps changing. It calls poll() a maximum of one time
2045                 for a given self._current_task instance. This is useful
2046                 since calling poll() on a task can trigger advance to
2047                 the next task could eventually lead to the returncode
2048                 being set in cases when polling only a single task would
2049                 not have the same effect.
2050                 """
2051
2052                 prev = None
2053                 while True:
2054                         task = self._current_task
2055                         if task is None or task is prev:
2056                                 # don't poll the same task more than once
2057                                 break
2058                         task.poll()
2059                         prev = task
2060
2061                 return self.returncode
2062
2063         def _wait(self):
2064
2065                 prev = None
2066                 while True:
2067                         task = self._current_task
2068                         if task is None:
2069                                 # don't wait for the same task more than once
2070                                 break
2071                         if task is prev:
2072                                 # Before the task.wait() method returned, an exit
2073                                 # listener should have set self._current_task to either
2074                                 # a different task or None. Something is wrong.
2075                                 raise AssertionError("self._current_task has not " + \
2076                                         "changed since calling wait", self, task)
2077                         task.wait()
2078                         prev = task
2079
2080                 return self.returncode
2081
2082         def _assert_current(self, task):
2083                 """
2084                 Raises an AssertionError if the given task is not the
2085                 same one as self._current_task. This can be useful
2086                 for detecting bugs.
2087                 """
2088                 if task is not self._current_task:
2089                         raise AssertionError("Unrecognized task: %s" % (task,))
2090
2091         def _default_exit(self, task):
2092                 """
2093                 Calls _assert_current() on the given task and then sets the
2094                 composite returncode attribute if task.returncode != os.EX_OK.
2095                 If the task failed then self._current_task will be set to None.
2096                 Subclasses can use this as a generic task exit callback.
2097
2098                 @rtype: int
2099                 @returns: The task.returncode attribute.
2100                 """
2101                 self._assert_current(task)
2102                 if task.returncode != os.EX_OK:
2103                         self.returncode = task.returncode
2104                         self._current_task = None
2105                 return task.returncode
2106
2107         def _final_exit(self, task):
2108                 """
2109                 Assumes that task is the final task of this composite task.
2110                 Calls _default_exit() and sets self.returncode to the task's
2111                 returncode and sets self._current_task to None.
2112                 """
2113                 self._default_exit(task)
2114                 self._current_task = None
2115                 self.returncode = task.returncode
2116                 return self.returncode
2117
2118         def _default_final_exit(self, task):
2119                 """
2120                 This calls _final_exit() and then wait().
2121
2122                 Subclasses can use this as a generic final task exit callback.
2123
2124                 """
2125                 self._final_exit(task)
2126                 return self.wait()
2127
2128         def _start_task(self, task, exit_handler):
2129                 """
2130                 Register exit handler for the given task, set it
2131                 as self._current_task, and call task.start().
2132
2133                 Subclasses can use this as a generic way to start
2134                 a task.
2135
2136                 """
2137                 task.addExitListener(exit_handler)
2138                 self._current_task = task
2139                 task.start()
2140
2141 class TaskSequence(CompositeTask):
2142         """
2143         A collection of tasks that executes sequentially. Each task
2144         must have a addExitListener() method that can be used as
2145         a means to trigger movement from one task to the next.
2146         """
2147
2148         __slots__ = ("_task_queue",)
2149
2150         def __init__(self, **kwargs):
2151                 AsynchronousTask.__init__(self, **kwargs)
2152                 self._task_queue = deque()
2153
2154         def add(self, task):
2155                 self._task_queue.append(task)
2156
2157         def _start(self):
2158                 self._start_next_task()
2159
2160         def cancel(self):
2161                 self._task_queue.clear()
2162                 CompositeTask.cancel(self)
2163
2164         def _start_next_task(self):
2165                 self._start_task(self._task_queue.popleft(),
2166                         self._task_exit_handler)
2167
2168         def _task_exit_handler(self, task):
2169                 if self._default_exit(task) != os.EX_OK:
2170                         self.wait()
2171                 elif self._task_queue:
2172                         self._start_next_task()
2173                 else:
2174                         self._final_exit(task)
2175                         self.wait()
2176
2177 class SubProcess(AbstractPollTask):
2178
2179         __slots__ = ("pid",) + \
2180                 ("_files", "_reg_id")
2181
2182         # A file descriptor is required for the scheduler to monitor changes from
2183         # inside a poll() loop. When logging is not enabled, create a pipe just to
2184         # serve this purpose alone.
2185         _dummy_pipe_fd = 9
2186
2187         def _poll(self):
2188                 if self.returncode is not None:
2189                         return self.returncode
2190                 if self.pid is None:
2191                         return self.returncode
2192                 if self._registered:
2193                         return self.returncode
2194
2195                 try:
2196                         retval = os.waitpid(self.pid, os.WNOHANG)
2197                 except OSError, e:
2198                         if e.errno != errno.ECHILD:
2199                                 raise
2200                         del e
2201                         retval = (self.pid, 1)
2202
2203                 if retval == (0, 0):
2204                         return None
2205                 self._set_returncode(retval)
2206                 return self.returncode
2207
2208         def cancel(self):
2209                 if self.isAlive():
2210                         try:
2211                                 os.kill(self.pid, signal.SIGTERM)
2212                         except OSError, e:
2213                                 if e.errno != errno.ESRCH:
2214                                         raise
2215                                 del e
2216
2217                 self.cancelled = True
2218                 if self.pid is not None:
2219                         self.wait()
2220                 return self.returncode
2221
2222         def isAlive(self):
2223                 return self.pid is not None and \
2224                         self.returncode is None
2225
2226         def _wait(self):
2227
2228                 if self.returncode is not None:
2229                         return self.returncode
2230
2231                 if self._registered:
2232                         self.scheduler.schedule(self._reg_id)
2233                         self._unregister()
2234                         if self.returncode is not None:
2235                                 return self.returncode
2236
2237                 try:
2238                         wait_retval = os.waitpid(self.pid, 0)
2239                 except OSError, e:
2240                         if e.errno != errno.ECHILD:
2241                                 raise
2242                         del e
2243                         self._set_returncode((self.pid, 1))
2244                 else:
2245                         self._set_returncode(wait_retval)
2246
2247                 return self.returncode
2248
2249         def _unregister(self):
2250                 """
2251                 Unregister from the scheduler and close open files.
2252                 """
2253
2254                 self._registered = False
2255
2256                 if self._reg_id is not None:
2257                         self.scheduler.unregister(self._reg_id)
2258                         self._reg_id = None
2259
2260                 if self._files is not None:
2261                         for f in self._files.itervalues():
2262                                 f.close()
2263                         self._files = None
2264
2265         def _set_returncode(self, wait_retval):
2266
2267                 retval = wait_retval[1]
2268
2269                 if retval != os.EX_OK:
2270                         if retval & 0xff:
2271                                 retval = (retval & 0xff) << 8
2272                         else:
2273                                 retval = retval >> 8
2274
2275                 self.returncode = retval
2276
2277 class SpawnProcess(SubProcess):
2278
2279         """
2280         Constructor keyword args are passed into portage.process.spawn().
2281         The required "args" keyword argument will be passed as the first
2282         spawn() argument.
2283         """
2284
2285         _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2286                 "uid", "gid", "groups", "umask", "logfile",
2287                 "path_lookup", "pre_exec")
2288
2289         __slots__ = ("args",) + \
2290                 _spawn_kwarg_names
2291
2292         _file_names = ("log", "process", "stdout")
2293         _files_dict = slot_dict_class(_file_names, prefix="")
2294
2295         def _start(self):
2296
2297                 if self.cancelled:
2298                         return
2299
2300                 if self.fd_pipes is None:
2301                         self.fd_pipes = {}
2302                 fd_pipes = self.fd_pipes
2303                 fd_pipes.setdefault(0, sys.stdin.fileno())
2304                 fd_pipes.setdefault(1, sys.stdout.fileno())
2305                 fd_pipes.setdefault(2, sys.stderr.fileno())
2306
2307                 # flush any pending output
2308                 for fd in fd_pipes.itervalues():
2309                         if fd == sys.stdout.fileno():
2310                                 sys.stdout.flush()
2311                         if fd == sys.stderr.fileno():
2312                                 sys.stderr.flush()
2313
2314                 logfile = self.logfile
2315                 self._files = self._files_dict()
2316                 files = self._files
2317
2318                 master_fd, slave_fd = self._pipe(fd_pipes)
2319                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2320                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2321
2322                 null_input = None
2323                 fd_pipes_orig = fd_pipes.copy()
2324                 if self.background:
2325                         # TODO: Use job control functions like tcsetpgrp() to control
2326                         # access to stdin. Until then, use /dev/null so that any
2327                         # attempts to read from stdin will immediately return EOF
2328                         # instead of blocking indefinitely.
2329                         null_input = open('/dev/null', 'rb')
2330                         fd_pipes[0] = null_input.fileno()
2331                 else:
2332                         fd_pipes[0] = fd_pipes_orig[0]
2333
2334                 files.process = os.fdopen(master_fd, 'rb')
2335                 if logfile is not None:
2336
2337                         fd_pipes[1] = slave_fd
2338                         fd_pipes[2] = slave_fd
2339
2340                         files.log = open(logfile, mode='ab')
2341                         portage.util.apply_secpass_permissions(logfile,
2342                                 uid=portage.portage_uid, gid=portage.portage_gid,
2343                                 mode=0660)
2344
2345                         if not self.background:
2346                                 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2347
2348                         output_handler = self._output_handler
2349
2350                 else:
2351
2352                         # Create a dummy pipe so the scheduler can monitor
2353                         # the process from inside a poll() loop.
2354                         fd_pipes[self._dummy_pipe_fd] = slave_fd
2355                         if self.background:
2356                                 fd_pipes[1] = slave_fd
2357                                 fd_pipes[2] = slave_fd
2358                         output_handler = self._dummy_handler
2359
2360                 kwargs = {}
2361                 for k in self._spawn_kwarg_names:
2362                         v = getattr(self, k)
2363                         if v is not None:
2364                                 kwargs[k] = v
2365
2366                 kwargs["fd_pipes"] = fd_pipes
2367                 kwargs["returnpid"] = True
2368                 kwargs.pop("logfile", None)
2369
2370                 self._reg_id = self.scheduler.register(files.process.fileno(),
2371                         self._registered_events, output_handler)
2372                 self._registered = True
2373
2374                 retval = self._spawn(self.args, **kwargs)
2375
2376                 os.close(slave_fd)
2377                 if null_input is not None:
2378                         null_input.close()
2379
2380                 if isinstance(retval, int):
2381                         # spawn failed
2382                         self._unregister()
2383                         self.returncode = retval
2384                         self.wait()
2385                         return
2386
2387                 self.pid = retval[0]
2388                 portage.process.spawned_pids.remove(self.pid)
2389
2390         def _pipe(self, fd_pipes):
2391                 """
2392                 @type fd_pipes: dict
2393                 @param fd_pipes: pipes from which to copy terminal size if desired.
2394                 """
2395                 return os.pipe()
2396
2397         def _spawn(self, args, **kwargs):
2398                 return portage.process.spawn(args, **kwargs)
2399
2400         def _output_handler(self, fd, event):
2401
2402                 if event & PollConstants.POLLIN:
2403
2404                         files = self._files
2405                         buf = array.array('B')
2406                         try:
2407                                 buf.fromfile(files.process, self._bufsize)
2408                         except EOFError:
2409                                 pass
2410
2411                         if buf:
2412                                 if not self.background:
2413                                         buf.tofile(files.stdout)
2414                                         files.stdout.flush()
2415                                 buf.tofile(files.log)
2416                                 files.log.flush()
2417                         else:
2418                                 self._unregister()
2419                                 self.wait()
2420
2421                 self._unregister_if_appropriate(event)
2422                 return self._registered
2423
2424         def _dummy_handler(self, fd, event):
2425                 """
2426                 This method is mainly interested in detecting EOF, since
2427                 the only purpose of the pipe is to allow the scheduler to
2428                 monitor the process from inside a poll() loop.
2429                 """
2430
2431                 if event & PollConstants.POLLIN:
2432
2433                         buf = array.array('B')
2434                         try:
2435                                 buf.fromfile(self._files.process, self._bufsize)
2436                         except EOFError:
2437                                 pass
2438
2439                         if buf:
2440                                 pass
2441                         else:
2442                                 self._unregister()
2443                                 self.wait()
2444
2445                 self._unregister_if_appropriate(event)
2446                 return self._registered
2447
2448 class MiscFunctionsProcess(SpawnProcess):
2449         """
2450         Spawns misc-functions.sh with an existing ebuild environment.
2451         """
2452
2453         __slots__ = ("commands", "phase", "pkg", "settings")
2454
2455         def _start(self):
2456                 settings = self.settings
2457                 settings.pop("EBUILD_PHASE", None)
2458                 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2459                 misc_sh_binary = os.path.join(portage_bin_path,
2460                         os.path.basename(portage.const.MISC_SH_BINARY))
2461
2462                 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2463                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2464
2465                 portage._doebuild_exit_status_unlink(
2466                         settings.get("EBUILD_EXIT_STATUS_FILE"))
2467
2468                 SpawnProcess._start(self)
2469
2470         def _spawn(self, args, **kwargs):
2471                 settings = self.settings
2472                 debug = settings.get("PORTAGE_DEBUG") == "1"
2473                 return portage.spawn(" ".join(args), settings,
2474                         debug=debug, **kwargs)
2475
2476         def _set_returncode(self, wait_retval):
2477                 SpawnProcess._set_returncode(self, wait_retval)
2478                 self.returncode = portage._doebuild_exit_status_check_and_log(
2479                         self.settings, self.phase, self.returncode)
2480
2481 class EbuildFetcher(SpawnProcess):
2482
2483         __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2484                 ("_build_dir",)
2485
2486         def _start(self):
2487
2488                 root_config = self.pkg.root_config
2489                 portdb = root_config.trees["porttree"].dbapi
2490                 ebuild_path = portdb.findname(self.pkg.cpv)
2491                 settings = self.config_pool.allocate()
2492                 settings.setcpv(self.pkg)
2493
2494                 # In prefetch mode, logging goes to emerge-fetch.log and the builddir
2495                 # should not be touched since otherwise it could interfere with
2496                 # another instance of the same cpv concurrently being built for a
2497                 # different $ROOT (currently, builds only cooperate with prefetchers
2498                 # that are spawned for the same $ROOT).
2499                 if not self.prefetch:
2500                         self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2501                         self._build_dir.lock()
2502                         self._build_dir.clean()
2503                         portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2504                         if self.logfile is None:
2505                                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2506
2507                 phase = "fetch"
2508                 if self.fetchall:
2509                         phase = "fetchall"
2510
2511                 # If any incremental variables have been overridden
2512                 # via the environment, those values need to be passed
2513                 # along here so that they are correctly considered by
2514                 # the config instance in the subproccess.
2515                 fetch_env = os.environ.copy()
2516
2517                 nocolor = settings.get("NOCOLOR")
2518                 if nocolor is not None:
2519                         fetch_env["NOCOLOR"] = nocolor
2520
2521                 fetch_env["PORTAGE_NICENESS"] = "0"
2522                 if self.prefetch:
2523                         fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2524
2525                 ebuild_binary = os.path.join(
2526                         settings["PORTAGE_BIN_PATH"], "ebuild")
2527
2528                 fetch_args = [ebuild_binary, ebuild_path, phase]
2529                 debug = settings.get("PORTAGE_DEBUG") == "1"
2530                 if debug:
2531                         fetch_args.append("--debug")
2532
2533                 self.args = fetch_args
2534                 self.env = fetch_env
2535                 SpawnProcess._start(self)
2536
2537         def _pipe(self, fd_pipes):
2538                 """When appropriate, use a pty so that fetcher progress bars,
2539                 like wget has, will work properly."""
2540                 if self.background or not sys.stdout.isatty():
2541                         # When the output only goes to a log file,
2542                         # there's no point in creating a pty.
2543                         return os.pipe()
2544                 stdout_pipe = fd_pipes.get(1)
2545                 got_pty, master_fd, slave_fd = \
2546                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2547                 return (master_fd, slave_fd)
2548
2549         def _set_returncode(self, wait_retval):
2550                 SpawnProcess._set_returncode(self, wait_retval)
2551                 # Collect elog messages that might have been
2552                 # created by the pkg_nofetch phase.
2553                 if self._build_dir is not None:
2554                         # Skip elog messages for prefetch, in order to avoid duplicates.
2555                         if not self.prefetch and self.returncode != os.EX_OK:
2556                                 elog_out = None
2557                                 if self.logfile is not None:
2558                                         if self.background:
2559                                                 elog_out = open(self.logfile, 'a')
2560                                 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2561                                 if self.logfile is not None:
2562                                         msg += ", Log file:"
2563                                 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2564                                 if self.logfile is not None:
2565                                         eerror(" '%s'" % (self.logfile,),
2566                                                 phase="unpack", key=self.pkg.cpv, out=elog_out)
2567                                 if elog_out is not None:
2568                                         elog_out.close()
2569                         if not self.prefetch:
2570                                 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2571                         features = self._build_dir.settings.features
2572                         if self.returncode == os.EX_OK:
2573                                 self._build_dir.clean()
2574                         self._build_dir.unlock()
2575                         self.config_pool.deallocate(self._build_dir.settings)
2576                         self._build_dir = None
2577
2578 class EbuildBuildDir(SlotObject):
2579
2580         __slots__ = ("dir_path", "pkg", "settings",
2581                 "locked", "_catdir", "_lock_obj")
2582
2583         def __init__(self, **kwargs):
2584                 SlotObject.__init__(self, **kwargs)
2585                 self.locked = False
2586
2587         def lock(self):
2588                 """
2589                 This raises an AlreadyLocked exception if lock() is called
2590                 while a lock is already held. In order to avoid this, call
2591                 unlock() or check whether the "locked" attribute is True
2592                 or False before calling lock().
2593                 """
2594                 if self._lock_obj is not None:
2595                         raise self.AlreadyLocked((self._lock_obj,))
2596
2597                 dir_path = self.dir_path
2598                 if dir_path is None:
2599                         root_config = self.pkg.root_config
2600                         portdb = root_config.trees["porttree"].dbapi
2601                         ebuild_path = portdb.findname(self.pkg.cpv)
2602                         settings = self.settings
2603                         settings.setcpv(self.pkg)
2604                         debug = settings.get("PORTAGE_DEBUG") == "1"
2605                         use_cache = 1 # always true
2606                         portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2607                                 self.settings, debug, use_cache, portdb)
2608                         dir_path = self.settings["PORTAGE_BUILDDIR"]
2609
2610                 catdir = os.path.dirname(dir_path)
2611                 self._catdir = catdir
2612
2613                 portage.util.ensure_dirs(os.path.dirname(catdir),
2614                         gid=portage.portage_gid,
2615                         mode=070, mask=0)
2616                 catdir_lock = None
2617                 try:
2618                         catdir_lock = portage.locks.lockdir(catdir)
2619                         portage.util.ensure_dirs(catdir,
2620                                 gid=portage.portage_gid,
2621                                 mode=070, mask=0)
2622                         self._lock_obj = portage.locks.lockdir(dir_path)
2623                 finally:
2624                         self.locked = self._lock_obj is not None
2625                         if catdir_lock is not None:
2626                                 portage.locks.unlockdir(catdir_lock)
2627
2628         def clean(self):
2629                 """Uses shutil.rmtree() rather than spawning a 'clean' phase. Disabled
2630                 by keepwork or keeptemp in FEATURES."""
2631                 settings = self.settings
2632                 features = settings.features
2633                 if not ("keepwork" in features or "keeptemp" in features):
2634                         try:
2635                                 shutil.rmtree(settings["PORTAGE_BUILDDIR"])
2636                         except EnvironmentError, e:
2637                                 if e.errno != errno.ENOENT:
2638                                         raise
2639                                 del e
2640
2641         def unlock(self):
2642                 if self._lock_obj is None:
2643                         return
2644
2645                 portage.locks.unlockdir(self._lock_obj)
2646                 self._lock_obj = None
2647                 self.locked = False
2648
2649                 catdir = self._catdir
2650                 catdir_lock = None
2651                 try:
2652                         catdir_lock = portage.locks.lockdir(catdir)
2653                 finally:
2654                         if catdir_lock:
2655                                 try:
2656                                         os.rmdir(catdir)
2657                                 except OSError, e:
2658                                         if e.errno not in (errno.ENOENT,
2659                                                 errno.ENOTEMPTY, errno.EEXIST):
2660                                                 raise
2661                                         del e
2662                                 portage.locks.unlockdir(catdir_lock)
2663
2664         class AlreadyLocked(portage.exception.PortageException):
2665                 pass
2666
2667 class EbuildBuild(CompositeTask):
2668
2669         __slots__ = ("args_set", "config_pool", "find_blockers",
2670                 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2671                 "prefetcher", "settings", "world_atom") + \
2672                 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2673
2674         def _start(self):
2675
2676                 logger = self.logger
2677                 opts = self.opts
2678                 pkg = self.pkg
2679                 settings = self.settings
2680                 world_atom = self.world_atom
2681                 root_config = pkg.root_config
2682                 tree = "porttree"
2683                 self._tree = tree
2684                 portdb = root_config.trees[tree].dbapi
2685                 settings.setcpv(pkg)
2686                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2687                 ebuild_path = portdb.findname(self.pkg.cpv)
2688                 self._ebuild_path = ebuild_path
2689
2690                 prefetcher = self.prefetcher
2691                 if prefetcher is None:
2692                         pass
2693                 elif not prefetcher.isAlive():
2694                         prefetcher.cancel()
2695                 elif prefetcher.poll() is None:
2696
2697                         waiting_msg = "Fetching files " + \
2698                                 "in the background. " + \
2699                                 "To view fetch progress, run `tail -f " + \
2700                                 "/var/log/emerge-fetch.log` in another " + \
2701                                 "terminal."
2702                         msg_prefix = colorize("GOOD", " * ")
2703                         from textwrap import wrap
2704                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2705                                 for line in wrap(waiting_msg, 65))
2706                         if not self.background:
2707                                 writemsg(waiting_msg, noiselevel=-1)
2708
2709                         self._current_task = prefetcher
2710                         prefetcher.addExitListener(self._prefetch_exit)
2711                         return
2712
2713                 self._prefetch_exit(prefetcher)
2714
2715         def _prefetch_exit(self, prefetcher):
2716
2717                 opts = self.opts
2718                 pkg = self.pkg
2719                 settings = self.settings
2720
2721                 if opts.fetchonly:
2722                                 fetcher = EbuildFetchonly(
2723                                         fetch_all=opts.fetch_all_uri,
2724                                         pkg=pkg, pretend=opts.pretend,
2725                                         settings=settings)
2726                                 retval = fetcher.execute()
2727                                 self.returncode = retval
2728                                 self.wait()
2729                                 return
2730
2731                 fetcher = EbuildFetcher(config_pool=self.config_pool,
2732                         fetchall=opts.fetch_all_uri,
2733                         fetchonly=opts.fetchonly,
2734                         background=self.background,
2735                         pkg=pkg, scheduler=self.scheduler)
2736
2737                 self._start_task(fetcher, self._fetch_exit)
2738
2739         def _fetch_exit(self, fetcher):
2740                 opts = self.opts
2741                 pkg = self.pkg
2742
2743                 fetch_failed = False
2744                 if opts.fetchonly:
2745                         fetch_failed = self._final_exit(fetcher) != os.EX_OK
2746                 else:
2747                         fetch_failed = self._default_exit(fetcher) != os.EX_OK
2748
2749                 if fetch_failed and fetcher.logfile is not None and \
2750                         os.path.exists(fetcher.logfile):
2751                         self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2752
2753                 if not fetch_failed and fetcher.logfile is not None:
2754                         # Fetch was successful, so remove the fetch log.
2755                         try:
2756                                 os.unlink(fetcher.logfile)
2757                         except OSError:
2758                                 pass
2759
2760                 if fetch_failed or opts.fetchonly:
2761                         self.wait()
2762                         return
2763
2764                 logger = self.logger
2765                 opts = self.opts
2766                 pkg_count = self.pkg_count
2767                 scheduler = self.scheduler
2768                 settings = self.settings
2769                 features = settings.features
2770                 ebuild_path = self._ebuild_path
2771                 system_set = pkg.root_config.sets["system"]
2772
2773                 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2774                 self._build_dir.lock()
2775
2776                 # Cleaning is triggered before the setup
2777                 # phase, in portage.doebuild().
2778                 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2779                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2780                 short_msg = "emerge: (%s of %s) %s Clean" % \
2781                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2782                 logger.log(msg, short_msg=short_msg)
2783
2784                 #buildsyspkg: Check if we need to _force_ binary package creation
2785                 self._issyspkg = "buildsyspkg" in features and \
2786                                 system_set.findAtomForPackage(pkg) and \
2787                                 not opts.buildpkg
2788
2789                 if opts.buildpkg or self._issyspkg:
2790
2791                         self._buildpkg = True
2792
2793                         msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2794                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2795                         short_msg = "emerge: (%s of %s) %s Compile" % \
2796                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2797                         logger.log(msg, short_msg=short_msg)
2798
2799                 else:
2800                         msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2801                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2802                         short_msg = "emerge: (%s of %s) %s Compile" % \
2803                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2804                         logger.log(msg, short_msg=short_msg)
2805
2806                 build = EbuildExecuter(background=self.background, pkg=pkg,
2807                         scheduler=scheduler, settings=settings)
2808                 self._start_task(build, self._build_exit)
2809
2810         def _unlock_builddir(self):
2811                 portage.elog.elog_process(self.pkg.cpv, self.settings)
2812                 self._build_dir.unlock()
2813
2814         def _build_exit(self, build):
2815                 if self._default_exit(build) != os.EX_OK:
2816                         self._unlock_builddir()
2817                         self.wait()
2818                         return
2819
2820                 opts = self.opts
2821                 buildpkg = self._buildpkg
2822
2823                 if not buildpkg:
2824                         self._final_exit(build)
2825                         self.wait()
2826                         return
2827
2828                 if self._issyspkg:
2829                         msg = ">>> This is a system package, " + \
2830                                 "let's pack a rescue tarball.\n"
2831
2832                         log_path = self.settings.get("PORTAGE_LOG_FILE")
2833                         if log_path is not None:
2834                                 log_file = open(log_path, 'a')
2835                                 try:
2836                                         log_file.write(msg)
2837                                 finally:
2838                                         log_file.close()
2839
2840                         if not self.background:
2841                                 portage.writemsg_stdout(msg, noiselevel=-1)
2842
2843                 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2844                         scheduler=self.scheduler, settings=self.settings)
2845
2846                 self._start_task(packager, self._buildpkg_exit)
2847
2848         def _buildpkg_exit(self, packager):
2849                 """
2850                 Released build dir lock when there is a failure or
2851                 when in buildpkgonly mode. Otherwise, the lock will
2852                 be released when merge() is called.
2853                 """
2854
2855                 if self._default_exit(packager) != os.EX_OK:
2856                         self._unlock_builddir()
2857                         self.wait()
2858                         return
2859
2860                 if self.opts.buildpkgonly:
2861                         # Need to call "clean" phase for buildpkgonly mode
2862                         portage.elog.elog_process(self.pkg.cpv, self.settings)
2863                         phase = "clean"
2864                         clean_phase = EbuildPhase(background=self.background,
2865                                 pkg=self.pkg, phase=phase,
2866                                 scheduler=self.scheduler, settings=self.settings,
2867                                 tree=self._tree)
2868                         self._start_task(clean_phase, self._clean_exit)
2869                         return
2870
2871                 # Continue holding the builddir lock until
2872                 # after the package has been installed.
2873                 self._current_task = None
2874                 self.returncode = packager.returncode
2875                 self.wait()
2876
2877         def _clean_exit(self, clean_phase):
2878                 if self._final_exit(clean_phase) != os.EX_OK or \
2879                         self.opts.buildpkgonly:
2880                         self._unlock_builddir()
2881                 self.wait()
2882
2883         def install(self):
2884                 """
2885                 Install the package and then clean up and release locks.
2886                 Only call this after the build has completed successfully
2887                 and neither fetchonly nor buildpkgonly mode are enabled.
2888                 """
2889
2890                 find_blockers = self.find_blockers
2891                 ldpath_mtimes = self.ldpath_mtimes
2892                 logger = self.logger
2893                 pkg = self.pkg
2894                 pkg_count = self.pkg_count
2895                 settings = self.settings
2896                 world_atom = self.world_atom
2897                 ebuild_path = self._ebuild_path
2898                 tree = self._tree
2899
2900                 merge = EbuildMerge(find_blockers=self.find_blockers,
2901                         ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2902                         pkg_count=pkg_count, pkg_path=ebuild_path,
2903                         scheduler=self.scheduler,
2904                         settings=settings, tree=tree, world_atom=world_atom)
2905
2906                 msg = " === (%s of %s) Merging (%s::%s)" % \
2907                         (pkg_count.curval, pkg_count.maxval,
2908                         pkg.cpv, ebuild_path)
2909                 short_msg = "emerge: (%s of %s) %s Merge" % \
2910                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2911                 logger.log(msg, short_msg=short_msg)
2912
2913                 try:
2914                         rval = merge.execute()
2915                 finally:
2916                         self._unlock_builddir()
2917
2918                 return rval
2919
2920 class EbuildExecuter(CompositeTask):
2921
2922         __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2923
2924         _phases = ("prepare", "configure", "compile", "test", "install")
2925
2926         _live_eclasses = frozenset([
2927                 "bzr",
2928                 "cvs",
2929                 "darcs",
2930                 "git",
2931                 "mercurial",
2932                 "subversion"
2933         ])
2934
2935         def _start(self):
2936                 self._tree = "porttree"
2937                 pkg = self.pkg
2938                 phase = "clean"
2939                 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2940                         scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2941                 self._start_task(clean_phase, self._clean_phase_exit)
2942
2943         def _clean_phase_exit(self, clean_phase):
2944
2945                 if self._default_exit(clean_phase) != os.EX_OK:
2946                         self.wait()
2947                         return
2948
2949                 pkg = self.pkg
2950                 scheduler = self.scheduler
2951                 settings = self.settings
2952                 cleanup = 1
2953
2954                 # This initializes PORTAGE_LOG_FILE.
2955                 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2956
2957                 setup_phase = EbuildPhase(background=self.background,
2958                         pkg=pkg, phase="setup", scheduler=scheduler,
2959                         settings=settings, tree=self._tree)
2960
2961                 setup_phase.addExitListener(self._setup_exit)
2962                 self._current_task = setup_phase
2963                 self.scheduler.scheduleSetup(setup_phase)
2964
2965         def _setup_exit(self, setup_phase):
2966
2967                 if self._default_exit(setup_phase) != os.EX_OK:
2968                         self.wait()
2969                         return
2970
2971                 unpack_phase = EbuildPhase(background=self.background,
2972                         pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2973                         settings=self.settings, tree=self._tree)
2974
2975                 if self._live_eclasses.intersection(self.pkg.inherited):
2976                         # Serialize $DISTDIR access for live ebuilds since
2977                         # otherwise they can interfere with eachother.
2978
2979                         unpack_phase.addExitListener(self._unpack_exit)
2980                         self._current_task = unpack_phase
2981                         self.scheduler.scheduleUnpack(unpack_phase)
2982
2983                 else:
2984                         self._start_task(unpack_phase, self._unpack_exit)
2985
2986         def _unpack_exit(self, unpack_phase):
2987
2988                 if self._default_exit(unpack_phase) != os.EX_OK:
2989                         self.wait()
2990                         return
2991
2992                 ebuild_phases = TaskSequence(scheduler=self.scheduler)
2993
2994                 pkg = self.pkg
2995                 phases = self._phases
2996                 eapi = pkg.metadata["EAPI"]
2997                 if eapi in ("0", "1"):
2998                         # skip src_prepare and src_configure
2999                         phases = phases[2:]
3000
3001                 for phase in phases:
3002                         ebuild_phases.add(EbuildPhase(background=self.background,
3003                                 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3004                                 settings=self.settings, tree=self._tree))
3005
3006                 self._start_task(ebuild_phases, self._default_final_exit)
3007
3008 class EbuildMetadataPhase(SubProcess):
3009
3010         """
3011         Asynchronous interface for the ebuild "depend" phase which is
3012         used to extract metadata from the ebuild.
3013         """
3014
3015         __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3016                 "ebuild_mtime", "portdb", "repo_path", "settings") + \
3017                 ("_raw_metadata",)
3018
3019         _file_names = ("ebuild",)
3020         _files_dict = slot_dict_class(_file_names, prefix="")
3021         _metadata_fd = 9
3022
3023         def _start(self):
3024                 settings = self.settings
3025                 settings.reset()
3026                 ebuild_path = self.ebuild_path
3027                 debug = settings.get("PORTAGE_DEBUG") == "1"
3028                 master_fd = None
3029                 slave_fd = None
3030                 fd_pipes = None
3031                 if self.fd_pipes is not None:
3032                         fd_pipes = self.fd_pipes.copy()
3033                 else:
3034                         fd_pipes = {}
3035
3036                 fd_pipes.setdefault(0, sys.stdin.fileno())
3037                 fd_pipes.setdefault(1, sys.stdout.fileno())
3038                 fd_pipes.setdefault(2, sys.stderr.fileno())
3039
3040                 # flush any pending output
3041                 for fd in fd_pipes.itervalues():
3042                         if fd == sys.stdout.fileno():
3043                                 sys.stdout.flush()
3044                         if fd == sys.stderr.fileno():
3045                                 sys.stderr.flush()
3046
3047                 fd_pipes_orig = fd_pipes.copy()
3048                 self._files = self._files_dict()
3049                 files = self._files
3050
3051                 master_fd, slave_fd = os.pipe()
3052                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3053                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3054
3055                 fd_pipes[self._metadata_fd] = slave_fd
3056
3057                 self._raw_metadata = []
3058                 files.ebuild = os.fdopen(master_fd, 'r')
3059                 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3060                         self._registered_events, self._output_handler)
3061                 self._registered = True
3062
3063                 retval = portage.doebuild(ebuild_path, "depend",
3064                         settings["ROOT"], settings, debug,
3065                         mydbapi=self.portdb, tree="porttree",
3066                         fd_pipes=fd_pipes, returnpid=True)
3067
3068                 os.close(slave_fd)
3069
3070                 if isinstance(retval, int):
3071                         # doebuild failed before spawning
3072                         self._unregister()
3073                         self.returncode = retval
3074                         self.wait()
3075                         return
3076
3077                 self.pid = retval[0]
3078                 portage.process.spawned_pids.remove(self.pid)
3079
3080         def _output_handler(self, fd, event):
3081
3082                 if event & PollConstants.POLLIN:
3083                         self._raw_metadata.append(self._files.ebuild.read())
3084                         if not self._raw_metadata[-1]:
3085                                 self._unregister()
3086                                 self.wait()
3087
3088                 self._unregister_if_appropriate(event)
3089                 return self._registered
3090
3091         def _set_returncode(self, wait_retval):
3092                 SubProcess._set_returncode(self, wait_retval)
3093                 if self.returncode == os.EX_OK:
3094                         metadata_lines = "".join(self._raw_metadata).splitlines()
3095                         if len(portage.auxdbkeys) != len(metadata_lines):
3096                                 # Don't trust bash's returncode if the
3097                                 # number of lines is incorrect.
3098                                 self.returncode = 1
3099                         else:
3100                                 metadata = izip(portage.auxdbkeys, metadata_lines)
3101                                 self.metadata_callback(self.cpv, self.ebuild_path,
3102                                         self.repo_path, metadata, self.ebuild_mtime)
3103
3104 class EbuildProcess(SpawnProcess):
3105
3106         __slots__ = ("phase", "pkg", "settings", "tree")
3107
3108         def _start(self):
3109                 # Don't open the log file during the clean phase since the
3110                 # open file can result in an nfs lock on $T/build.log which
3111                 # prevents the clean phase from removing $T.
3112                 if self.phase not in ("clean", "cleanrm"):
3113                         self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3114                 SpawnProcess._start(self)
3115
3116         def _pipe(self, fd_pipes):
3117                 stdout_pipe = fd_pipes.get(1)
3118                 got_pty, master_fd, slave_fd = \
3119                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3120                 return (master_fd, slave_fd)
3121
3122         def _spawn(self, args, **kwargs):
3123
3124                 root_config = self.pkg.root_config
3125                 tree = self.tree
3126                 mydbapi = root_config.trees[tree].dbapi
3127                 settings = self.settings
3128                 ebuild_path = settings["EBUILD"]
3129                 debug = settings.get("PORTAGE_DEBUG") == "1"
3130
3131                 rval = portage.doebuild(ebuild_path, self.phase,
3132                         root_config.root, settings, debug,
3133                         mydbapi=mydbapi, tree=tree, **kwargs)
3134
3135                 return rval
3136
3137         def _set_returncode(self, wait_retval):
3138                 SpawnProcess._set_returncode(self, wait_retval)
3139
3140                 if self.phase not in ("clean", "cleanrm"):
3141                         self.returncode = portage._doebuild_exit_status_check_and_log(
3142                                 self.settings, self.phase, self.returncode)
3143
3144                 if self.phase == "test" and self.returncode != os.EX_OK and \
3145                         "test-fail-continue" in self.settings.features:
3146                         self.returncode = os.EX_OK
3147
3148                 portage._post_phase_userpriv_perms(self.settings)
3149
3150 class EbuildPhase(CompositeTask):
3151
3152         __slots__ = ("background", "pkg", "phase",
3153                 "scheduler", "settings", "tree")
3154
3155         _post_phase_cmds = portage._post_phase_cmds
3156
3157         def _start(self):
3158
3159                 ebuild_process = EbuildProcess(background=self.background,
3160                         pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3161                         settings=self.settings, tree=self.tree)
3162
3163                 self._start_task(ebuild_process, self._ebuild_exit)
3164
3165         def _ebuild_exit(self, ebuild_process):
3166
3167                 if self.phase == "install":
3168                         out = None
3169                         log_path = self.settings.get("PORTAGE_LOG_FILE")
3170                         log_file = None
3171                         if self.background and log_path is not None:
3172                                 log_file = open(log_path, 'a')
3173                                 out = log_file
3174                         try:
3175                                 portage._check_build_log(self.settings, out=out)
3176                         finally:
3177                                 if log_file is not None:
3178                                         log_file.close()
3179
3180                 if self._default_exit(ebuild_process) != os.EX_OK:
3181                         self.wait()
3182                         return
3183
3184                 settings = self.settings
3185
3186                 if self.phase == "install":
3187                         portage._post_src_install_uid_fix(settings)
3188
3189                 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3190                 if post_phase_cmds is not None:
3191                         post_phase = MiscFunctionsProcess(background=self.background,
3192                                 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3193                                 scheduler=self.scheduler, settings=settings)
3194                         self._start_task(post_phase, self._post_phase_exit)
3195                         return
3196
3197                 self.returncode = ebuild_process.returncode
3198                 self._current_task = None
3199                 self.wait()
3200
3201         def _post_phase_exit(self, post_phase):
3202                 if self._final_exit(post_phase) != os.EX_OK:
3203                         writemsg("!!! post %s failed; exiting.\n" % self.phase,
3204                                 noiselevel=-1)
3205                 self._current_task = None
3206                 self.wait()
3207                 return
3208
3209 class EbuildBinpkg(EbuildProcess):
3210         """
3211         This assumes that src_install() has successfully completed.
3212         """
3213         __slots__ = ("_binpkg_tmpfile",)
3214
3215         def _start(self):
3216                 self.phase = "package"
3217                 self.tree = "porttree"
3218                 pkg = self.pkg
3219                 root_config = pkg.root_config
3220                 portdb = root_config.trees["porttree"].dbapi
3221                 bintree = root_config.trees["bintree"]
3222                 ebuild_path = portdb.findname(self.pkg.cpv)
3223                 settings = self.settings
3224                 debug = settings.get("PORTAGE_DEBUG") == "1"
3225
3226                 bintree.prevent_collision(pkg.cpv)
3227                 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3228                         pkg.cpv + ".tbz2." + str(os.getpid()))
3229                 self._binpkg_tmpfile = binpkg_tmpfile
3230                 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3231                 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3232
3233                 try:
3234                         EbuildProcess._start(self)
3235                 finally:
3236                         settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3237
3238         def _set_returncode(self, wait_retval):
3239                 EbuildProcess._set_returncode(self, wait_retval)
3240
3241                 pkg = self.pkg
3242                 bintree = pkg.root_config.trees["bintree"]
3243                 binpkg_tmpfile = self._binpkg_tmpfile
3244                 if self.returncode == os.EX_OK:
3245                         bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3246
3247 class EbuildMerge(SlotObject):
3248
3249         __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3250                 "pkg", "pkg_count", "pkg_path", "pretend",
3251                 "scheduler", "settings", "tree", "world_atom")
3252
3253         def execute(self):
3254                 root_config = self.pkg.root_config
3255                 settings = self.settings
3256                 retval = portage.merge(settings["CATEGORY"],
3257                         settings["PF"], settings["D"],
3258                         os.path.join(settings["PORTAGE_BUILDDIR"],
3259                         "build-info"), root_config.root, settings,
3260                         myebuild=settings["EBUILD"],
3261                         mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3262                         vartree=root_config.trees["vartree"],
3263                         prev_mtimes=self.ldpath_mtimes,
3264                         scheduler=self.scheduler,
3265                         blockers=self.find_blockers)
3266
3267                 if retval == os.EX_OK:
3268                         self.world_atom(self.pkg)
3269                         self._log_success()
3270
3271                 return retval
3272
3273         def _log_success(self):
3274                 pkg = self.pkg
3275                 pkg_count = self.pkg_count
3276                 pkg_path = self.pkg_path
3277                 logger = self.logger
3278                 if "noclean" not in self.settings.features:
3279                         short_msg = "emerge: (%s of %s) %s Clean Post" % \
3280                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3281                         logger.log((" === (%s of %s) " + \
3282                                 "Post-Build Cleaning (%s::%s)") % \
3283                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3284                                 short_msg=short_msg)
3285                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3286                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3287
3288 class PackageUninstall(AsynchronousTask):
3289
3290         __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3291
3292         def _start(self):
3293                 try:
3294                         unmerge(self.pkg.root_config, self.opts, "unmerge",
3295                                 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3296                                 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3297                                 writemsg_level=self._writemsg_level)
3298                 except UninstallFailure, e:
3299                         self.returncode = e.status
3300                 else:
3301                         self.returncode = os.EX_OK
3302                 self.wait()
3303
3304         def _writemsg_level(self, msg, level=0, noiselevel=0):
3305
3306                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3307                 background = self.background
3308
3309                 if log_path is None:
3310                         if not (background and level < logging.WARNING):
3311                                 portage.util.writemsg_level(msg,
3312                                         level=level, noiselevel=noiselevel)
3313                 else:
3314                         if not background:
3315                                 portage.util.writemsg_level(msg,
3316                                         level=level, noiselevel=noiselevel)
3317
3318                         f = open(log_path, 'a')
3319                         try:
3320                                 f.write(msg)
3321                         finally:
3322                                 f.close()
3323
3324 class Binpkg(CompositeTask):
3325
3326         __slots__ = ("find_blockers",
3327                 "ldpath_mtimes", "logger", "opts",
3328                 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3329                 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3330                 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3331
3332         def _writemsg_level(self, msg, level=0, noiselevel=0):
3333
3334                 if not self.background:
3335                         portage.util.writemsg_level(msg,
3336                                 level=level, noiselevel=noiselevel)
3337
3338                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3339                 if  log_path is not None:
3340                         f = open(log_path, 'a')
3341                         try:
3342                                 f.write(msg)
3343                         finally:
3344                                 f.close()
3345
3346         def _start(self):
3347
3348                 pkg = self.pkg
3349                 settings = self.settings
3350                 settings.setcpv(pkg)
3351                 self._tree = "bintree"
3352                 self._bintree = self.pkg.root_config.trees[self._tree]
3353                 self._verify = not self.opts.pretend
3354
3355                 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3356                         "portage", pkg.category, pkg.pf)
3357                 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3358                         pkg=pkg, settings=settings)
3359                 self._image_dir = os.path.join(dir_path, "image")
3360                 self._infloc = os.path.join(dir_path, "build-info")
3361                 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3362                 settings["EBUILD"] = self._ebuild_path
3363                 debug = settings.get("PORTAGE_DEBUG") == "1"
3364                 portage.doebuild_environment(self._ebuild_path, "setup",
3365                         settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3366                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3367
3368                 # The prefetcher has already completed or it
3369                 # could be running now. If it's running now,
3370                 # wait for it to complete since it holds
3371                 # a lock on the file being fetched. The
3372                 # portage.locks functions are only designed
3373                 # to work between separate processes. Since
3374                 # the lock is held by the current process,
3375                 # use the scheduler and fetcher methods to
3376                 # synchronize with the fetcher.
3377                 prefetcher = self.prefetcher
3378                 if prefetcher is None:
3379                         pass
3380                 elif not prefetcher.isAlive():
3381                         prefetcher.cancel()
3382                 elif prefetcher.poll() is None:
3383
3384                         waiting_msg = ("Fetching '%s' " + \
3385                                 "in the background. " + \
3386                                 "To view fetch progress, run `tail -f " + \
3387                                 "/var/log/emerge-fetch.log` in another " + \
3388                                 "terminal.") % prefetcher.pkg_path
3389                         msg_prefix = colorize("GOOD", " * ")
3390                         from textwrap import wrap
3391                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3392                                 for line in wrap(waiting_msg, 65))
3393                         if not self.background:
3394                                 writemsg(waiting_msg, noiselevel=-1)
3395
3396                         self._current_task = prefetcher
3397                         prefetcher.addExitListener(self._prefetch_exit)
3398                         return
3399
3400                 self._prefetch_exit(prefetcher)
3401
3402         def _prefetch_exit(self, prefetcher):
3403
3404                 pkg = self.pkg
3405                 pkg_count = self.pkg_count
3406                 if not (self.opts.pretend or self.opts.fetchonly):
3407                         self._build_dir.lock()
3408                         try:
3409                                 shutil.rmtree(self._build_dir.dir_path)
3410                         except EnvironmentError, e:
3411                                 if e.errno != errno.ENOENT:
3412                                         raise
3413                                 del e
3414                         portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3415                 fetcher = BinpkgFetcher(background=self.background,
3416                         logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3417                         pretend=self.opts.pretend, scheduler=self.scheduler)
3418                 pkg_path = fetcher.pkg_path
3419                 self._pkg_path = pkg_path
3420
3421                 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3422
3423                         msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3424                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3425                         short_msg = "emerge: (%s of %s) %s Fetch" % \
3426                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3427                         self.logger.log(msg, short_msg=short_msg)
3428                         self._start_task(fetcher, self._fetcher_exit)
3429                         return
3430
3431                 self._fetcher_exit(fetcher)
3432
3433         def _fetcher_exit(self, fetcher):
3434
3435                 # The fetcher only has a returncode when
3436                 # --getbinpkg is enabled.
3437                 if fetcher.returncode is not None:
3438                         self._fetched_pkg = True
3439                         if self._default_exit(fetcher) != os.EX_OK:
3440                                 self._unlock_builddir()
3441                                 self.wait()
3442                                 return
3443
3444                 if self.opts.pretend:
3445                         self._current_task = None
3446                         self.returncode = os.EX_OK
3447                         self.wait()
3448                         return
3449
3450                 verifier = None
3451                 if self._verify:
3452                         logfile = None
3453                         if self.background:
3454                                 logfile = self.settings.get("PORTAGE_LOG_FILE")
3455                         verifier = BinpkgVerifier(background=self.background,
3456                                 logfile=logfile, pkg=self.pkg)
3457                         self._start_task(verifier, self._verifier_exit)
3458                         return
3459
3460                 self._verifier_exit(verifier)
3461
3462         def _verifier_exit(self, verifier):
3463                 if verifier is not None and \
3464                         self._default_exit(verifier) != os.EX_OK:
3465                         self._unlock_builddir()
3466                         self.wait()
3467                         return
3468
3469                 logger = self.logger
3470                 pkg = self.pkg
3471                 pkg_count = self.pkg_count
3472                 pkg_path = self._pkg_path
3473
3474                 if self._fetched_pkg:
3475                         self._bintree.inject(pkg.cpv, filename=pkg_path)
3476
3477                 if self.opts.fetchonly:
3478                         self._current_task = None
3479                         self.returncode = os.EX_OK
3480                         self.wait()
3481                         return
3482
3483                 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3484                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3485                 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3486                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3487                 logger.log(msg, short_msg=short_msg)
3488
3489                 phase = "clean"
3490                 settings = self.settings
3491                 ebuild_phase = EbuildPhase(background=self.background,
3492                         pkg=pkg, phase=phase, scheduler=self.scheduler,
3493                         settings=settings, tree=self._tree)
3494
3495                 self._start_task(ebuild_phase, self._clean_exit)
3496
3497         def _clean_exit(self, clean_phase):
3498                 if self._default_exit(clean_phase) != os.EX_OK:
3499                         self._unlock_builddir()
3500                         self.wait()
3501                         return
3502
3503                 dir_path = self._build_dir.dir_path
3504
3505                 try:
3506                         shutil.rmtree(dir_path)
3507                 except (IOError, OSError), e:
3508                         if e.errno != errno.ENOENT:
3509                                 raise
3510                         del e
3511
3512                 infloc = self._infloc
3513                 pkg = self.pkg
3514                 pkg_path = self._pkg_path
3515
3516                 dir_mode = 0755
3517                 for mydir in (dir_path, self._image_dir, infloc):
3518                         portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3519                                 gid=portage.data.portage_gid, mode=dir_mode)
3520
3521                 # This initializes PORTAGE_LOG_FILE.
3522                 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3523                 self._writemsg_level(">>> Extracting info\n")
3524
3525                 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3526                 check_missing_metadata = ("CATEGORY", "PF")
3527                 missing_metadata = set()
3528                 for k in check_missing_metadata:
3529                         v = pkg_xpak.getfile(k)
3530                         if not v:
3531                                 missing_metadata.add(k)
3532
3533                 pkg_xpak.unpackinfo(infloc)
3534                 for k in missing_metadata:
3535                         if k == "CATEGORY":
3536                                 v = pkg.category
3537                         elif k == "PF":
3538                                 v = pkg.pf
3539                         else:
3540                                 continue
3541
3542                         f = open(os.path.join(infloc, k), 'wb')
3543                         try:
3544                                 f.write(v + "\n")
3545                         finally:
3546                                 f.close()
3547
3548                 # Store the md5sum in the vdb.
3549                 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3550                 try:
3551                         f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3552                 finally:
3553                         f.close()
3554
3555                 # This gives bashrc users an opportunity to do various things
3556                 # such as remove binary packages after they're installed.
3557                 settings = self.settings
3558                 settings.setcpv(self.pkg)
3559                 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3560                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3561
3562                 phase = "setup"
3563                 setup_phase = EbuildPhase(background=self.background,
3564                         pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3565                         settings=settings, tree=self._tree)
3566
3567                 setup_phase.addExitListener(self._setup_exit)
3568                 self._current_task = setup_phase
3569                 self.scheduler.scheduleSetup(setup_phase)
3570
3571         def _setup_exit(self, setup_phase):
3572                 if self._default_exit(setup_phase) != os.EX_OK:
3573                         self._unlock_builddir()
3574                         self.wait()
3575                         return
3576
3577                 extractor = BinpkgExtractorAsync(background=self.background,
3578                         image_dir=self._image_dir,
3579                         pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3580                 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3581                 self._start_task(extractor, self._extractor_exit)
3582
3583         def _extractor_exit(self, extractor):
3584                 if self._final_exit(extractor) != os.EX_OK:
3585                         self._unlock_builddir()
3586                         writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3587                                 noiselevel=-1)
3588                 self.wait()
3589
3590         def _unlock_builddir(self):
3591                 if self.opts.pretend or self.opts.fetchonly:
3592                         return
3593                 portage.elog.elog_process(self.pkg.cpv, self.settings)
3594                 self._build_dir.unlock()
3595
3596         def install(self):
3597
3598                 # This gives bashrc users an opportunity to do various things
3599                 # such as remove binary packages after they're installed.
3600                 settings = self.settings
3601                 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3602                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3603
3604                 merge = EbuildMerge(find_blockers=self.find_blockers,
3605                         ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3606                         pkg=self.pkg, pkg_count=self.pkg_count,
3607                         pkg_path=self._pkg_path, scheduler=self.scheduler,
3608                         settings=settings, tree=self._tree, world_atom=self.world_atom)
3609
3610                 try:
3611                         retval = merge.execute()
3612                 finally:
3613                         settings.pop("PORTAGE_BINPKG_FILE", None)
3614                         self._unlock_builddir()
3615                 return retval
3616
3617 class BinpkgFetcher(SpawnProcess):
3618
3619         __slots__ = ("pkg", "pretend",
3620                 "locked", "pkg_path", "_lock_obj")
3621
3622         def __init__(self, **kwargs):
3623                 SpawnProcess.__init__(self, **kwargs)
3624                 pkg = self.pkg
3625                 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3626
3627         def _start(self):
3628
3629                 if self.cancelled:
3630                         return
3631
3632                 pkg = self.pkg
3633                 pretend = self.pretend
3634                 bintree = pkg.root_config.trees["bintree"]
3635                 settings = bintree.settings
3636                 use_locks = "distlocks" in settings.features
3637                 pkg_path = self.pkg_path
3638
3639                 if not pretend:
3640                         portage.util.ensure_dirs(os.path.dirname(pkg_path))
3641                         if use_locks:
3642                                 self.lock()
3643                 exists = os.path.exists(pkg_path)
3644                 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3645                 if not (pretend or resume):
3646                         # Remove existing file or broken symlink.
3647                         try:
3648                                 os.unlink(pkg_path)
3649                         except OSError:
3650                                 pass
3651
3652                 # urljoin doesn't work correctly with
3653                 # unrecognized protocols like sftp
3654                 if bintree._remote_has_index:
3655                         rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3656                         if not rel_uri:
3657                                 rel_uri = pkg.cpv + ".tbz2"
3658                         uri = bintree._remote_base_uri.rstrip("/") + \
3659                                 "/" + rel_uri.lstrip("/")
3660                 else:
3661                         uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3662                                 "/" + pkg.pf + ".tbz2"
3663
3664                 if pretend:
3665                         portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3666                         self.returncode = os.EX_OK
3667                         self.wait()
3668                         return
3669
3670                 protocol = urlparse.urlparse(uri)[0]
3671                 fcmd_prefix = "FETCHCOMMAND"
3672                 if resume:
3673                         fcmd_prefix = "RESUMECOMMAND"
3674                 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3675                 if not fcmd:
3676                         fcmd = settings.get(fcmd_prefix)
3677
3678                 fcmd_vars = {
3679                         "DISTDIR" : os.path.dirname(pkg_path),
3680                         "URI"     : uri,
3681                         "FILE"    : os.path.basename(pkg_path)
3682                 }
3683
3684                 fetch_env = dict(settings.iteritems())
3685                 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3686                         for x in shlex.split(fcmd)]
3687
3688                 if self.fd_pipes is None:
3689                         self.fd_pipes = {}
3690                 fd_pipes = self.fd_pipes
3691
3692                 # Redirect all output to stdout since some fetchers like
3693                 # wget pollute stderr (if portage detects a problem then it
3694                 # can send it's own message to stderr).
3695                 fd_pipes.setdefault(0, sys.stdin.fileno())
3696                 fd_pipes.setdefault(1, sys.stdout.fileno())
3697                 fd_pipes.setdefault(2, sys.stdout.fileno())
3698
3699                 self.args = fetch_args
3700                 self.env = fetch_env
3701                 SpawnProcess._start(self)
3702
3703         def _set_returncode(self, wait_retval):
3704                 SpawnProcess._set_returncode(self, wait_retval)
3705                 if self.returncode == os.EX_OK:
3706                         # If possible, update the mtime to match the remote package if
3707                         # the fetcher didn't already do it automatically.
3708                         bintree = self.pkg.root_config.trees["bintree"]
3709                         if bintree._remote_has_index:
3710                                 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3711                                 if remote_mtime is not None:
3712                                         try:
3713                                                 remote_mtime = long(remote_mtime)
3714                                         except ValueError:
3715                                                 pass
3716                                         else:
3717                                                 try:
3718                                                         local_mtime = long(os.stat(self.pkg_path).st_mtime)
3719                                                 except OSError:
3720                                                         pass
3721                                                 else:
3722                                                         if remote_mtime != local_mtime:
3723                                                                 try:
3724                                                                         os.utime(self.pkg_path,
3725                                                                                 (remote_mtime, remote_mtime))
3726                                                                 except OSError:
3727                                                                         pass
3728
3729                 if self.locked:
3730                         self.unlock()
3731
3732         def lock(self):
3733                 """
3734                 This raises an AlreadyLocked exception if lock() is called
3735                 while a lock is already held. In order to avoid this, call
3736                 unlock() or check whether the "locked" attribute is True
3737                 or False before calling lock().
3738                 """
3739                 if self._lock_obj is not None:
3740                         raise self.AlreadyLocked((self._lock_obj,))
3741
3742                 self._lock_obj = portage.locks.lockfile(
3743                         self.pkg_path, wantnewlockfile=1)
3744                 self.locked = True
3745
3746         class AlreadyLocked(portage.exception.PortageException):
3747                 pass
3748
3749         def unlock(self):
3750                 if self._lock_obj is None:
3751                         return
3752                 portage.locks.unlockfile(self._lock_obj)
3753                 self._lock_obj = None
3754                 self.locked = False
3755
3756 class BinpkgVerifier(AsynchronousTask):
3757         __slots__ = ("logfile", "pkg",)
3758
3759         def _start(self):
3760                 """
3761                 Note: Unlike a normal AsynchronousTask.start() method,
3762                 this one does all work is synchronously. The returncode
3763                 attribute will be set before it returns.
3764                 """
3765
3766                 pkg = self.pkg
3767                 root_config = pkg.root_config
3768                 bintree = root_config.trees["bintree"]
3769                 rval = os.EX_OK
3770                 stdout_orig = sys.stdout
3771                 stderr_orig = sys.stderr
3772                 log_file = None
3773                 if self.background and self.logfile is not None:
3774                         log_file = open(self.logfile, 'a')
3775                 try:
3776                         if log_file is not None:
3777                                 sys.stdout = log_file
3778                                 sys.stderr = log_file
3779                         try:
3780                                 bintree.digestCheck(pkg)
3781                         except portage.exception.FileNotFound:
3782                                 writemsg("!!! Fetching Binary failed " + \
3783                                         "for '%s'\n" % pkg.cpv, noiselevel=-1)
3784                                 rval = 1
3785                         except portage.exception.DigestException, e:
3786                                 writemsg("\n!!! Digest verification failed:\n",
3787                                         noiselevel=-1)
3788                                 writemsg("!!! %s\n" % e.value[0],
3789                                         noiselevel=-1)
3790                                 writemsg("!!! Reason: %s\n" % e.value[1],
3791                                         noiselevel=-1)
3792                                 writemsg("!!! Got: %s\n" % e.value[2],
3793                                         noiselevel=-1)
3794                                 writemsg("!!! Expected: %s\n" % e.value[3],
3795                                         noiselevel=-1)
3796                                 rval = 1
3797                         if rval != os.EX_OK:
3798                                 pkg_path = bintree.getname(pkg.cpv)
3799                                 head, tail = os.path.split(pkg_path)
3800                                 temp_filename = portage._checksum_failure_temp_file(head, tail)
3801                                 writemsg("File renamed to '%s'\n" % (temp_filename,),
3802                                         noiselevel=-1)
3803                 finally:
3804                         sys.stdout = stdout_orig
3805                         sys.stderr = stderr_orig
3806                         if log_file is not None:
3807                                 log_file.close()
3808
3809                 self.returncode = rval
3810                 self.wait()
3811
3812 class BinpkgPrefetcher(CompositeTask):
3813
3814         __slots__ = ("pkg",) + \
3815                 ("pkg_path", "_bintree",)
3816
3817         def _start(self):
3818                 self._bintree = self.pkg.root_config.trees["bintree"]
3819                 fetcher = BinpkgFetcher(background=self.background,
3820                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3821                         scheduler=self.scheduler)
3822                 self.pkg_path = fetcher.pkg_path
3823                 self._start_task(fetcher, self._fetcher_exit)
3824
3825         def _fetcher_exit(self, fetcher):
3826
3827                 if self._default_exit(fetcher) != os.EX_OK:
3828                         self.wait()
3829                         return
3830
3831                 verifier = BinpkgVerifier(background=self.background,
3832                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3833                 self._start_task(verifier, self._verifier_exit)
3834
3835         def _verifier_exit(self, verifier):
3836                 if self._default_exit(verifier) != os.EX_OK:
3837                         self.wait()
3838                         return
3839
3840                 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3841
3842                 self._current_task = None
3843                 self.returncode = os.EX_OK
3844                 self.wait()
3845
3846 class BinpkgExtractorAsync(SpawnProcess):
3847
3848         __slots__ = ("image_dir", "pkg", "pkg_path")
3849
3850         _shell_binary = portage.const.BASH_BINARY
3851
3852         def _start(self):
3853                 self.args = [self._shell_binary, "-c",
3854                         "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3855                         (portage._shell_quote(self.pkg_path),
3856                         portage._shell_quote(self.image_dir))]
3857
3858                 self.env = self.pkg.root_config.settings.environ()
3859                 SpawnProcess._start(self)
3860
3861 class MergeListItem(CompositeTask):
3862
3863         """
3864         TODO: For parallel scheduling, everything here needs asynchronous
3865         execution support (start, poll, and wait methods).
3866         """
3867
3868         __slots__ = ("args_set",
3869                 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3870                 "find_blockers", "logger", "mtimedb", "pkg",
3871                 "pkg_count", "pkg_to_replace", "prefetcher",
3872                 "settings", "statusMessage", "world_atom") + \
3873                 ("_install_task",)
3874
3875         def _start(self):
3876
3877                 pkg = self.pkg
3878                 build_opts = self.build_opts
3879
3880                 if pkg.installed:
3881                         # uninstall,  executed by self.merge()
3882                         self.returncode = os.EX_OK
3883                         self.wait()
3884                         return
3885
3886                 args_set = self.args_set
3887                 find_blockers = self.find_blockers
3888                 logger = self.logger
3889                 mtimedb = self.mtimedb
3890                 pkg_count = self.pkg_count
3891                 scheduler = self.scheduler
3892                 settings = self.settings
3893                 world_atom = self.world_atom
3894                 ldpath_mtimes = mtimedb["ldpath"]
3895
3896                 action_desc = "Emerging"
3897                 preposition = "for"
3898                 if pkg.type_name == "binary":
3899                         action_desc += " binary"
3900
3901                 if build_opts.fetchonly:
3902                         action_desc = "Fetching"
3903
3904                 msg = "%s (%s of %s) %s" % \
3905                         (action_desc,
3906                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3907                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3908                         colorize("GOOD", pkg.cpv))
3909
3910                 portdb = pkg.root_config.trees["porttree"].dbapi
3911                 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3912                 if portdir_repo_name:
3913                         pkg_repo_name = pkg.metadata.get("repository")
3914                         if pkg_repo_name != portdir_repo_name:
3915                                 if not pkg_repo_name:
3916                                         pkg_repo_name = "unknown repo"
3917                                 msg += " from %s" % pkg_repo_name
3918
3919                 if pkg.root != "/":
3920                         msg += " %s %s" % (preposition, pkg.root)
3921
3922                 if not build_opts.pretend:
3923                         self.statusMessage(msg)
3924                         logger.log(" >>> emerge (%s of %s) %s to %s" % \
3925                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3926
3927                 if pkg.type_name == "ebuild":
3928
3929                         build = EbuildBuild(args_set=args_set,
3930                                 background=self.background,
3931                                 config_pool=self.config_pool,
3932                                 find_blockers=find_blockers,
3933                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3934                                 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3935                                 prefetcher=self.prefetcher, scheduler=scheduler,
3936                                 settings=settings, world_atom=world_atom)
3937
3938                         self._install_task = build
3939                         self._start_task(build, self._default_final_exit)
3940                         return
3941
3942                 elif pkg.type_name == "binary":
3943
3944                         binpkg = Binpkg(background=self.background,
3945                                 find_blockers=find_blockers,
3946                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3947                                 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3948                                 prefetcher=self.prefetcher, settings=settings,
3949                                 scheduler=scheduler, world_atom=world_atom)
3950
3951                         self._install_task = binpkg
3952                         self._start_task(binpkg, self._default_final_exit)
3953                         return
3954
3955         def _poll(self):
3956                 self._install_task.poll()
3957                 return self.returncode
3958
3959         def _wait(self):
3960                 self._install_task.wait()
3961                 return self.returncode
3962
3963         def merge(self):
3964
3965                 pkg = self.pkg
3966                 build_opts = self.build_opts
3967                 find_blockers = self.find_blockers
3968                 logger = self.logger
3969                 mtimedb = self.mtimedb
3970                 pkg_count = self.pkg_count
3971                 prefetcher = self.prefetcher
3972                 scheduler = self.scheduler
3973                 settings = self.settings
3974                 world_atom = self.world_atom
3975                 ldpath_mtimes = mtimedb["ldpath"]
3976
3977                 if pkg.installed:
3978                         if not (build_opts.buildpkgonly or \
3979                                 build_opts.fetchonly or build_opts.pretend):
3980
3981                                 uninstall = PackageUninstall(background=self.background,
3982                                         ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
3983                                         pkg=pkg, scheduler=scheduler, settings=settings)
3984
3985                                 uninstall.start()
3986                                 retval = uninstall.wait()
3987                                 if retval != os.EX_OK:
3988                                         return retval
3989                         return os.EX_OK
3990
3991                 if build_opts.fetchonly or \
3992                         build_opts.buildpkgonly:
3993                         return self.returncode
3994
3995                 retval = self._install_task.install()
3996                 return retval
3997
3998 class PackageMerge(AsynchronousTask):
3999         """
4000         TODO: Implement asynchronous merge so that the scheduler can
4001         run while a merge is executing.
4002         """
4003
4004         __slots__ = ("merge",)
4005
4006         def _start(self):
4007
4008                 pkg = self.merge.pkg
4009                 pkg_count = self.merge.pkg_count
4010
4011                 if pkg.installed:
4012                         action_desc = "Uninstalling"
4013                         preposition = "from"
4014                 else:
4015                         action_desc = "Installing"
4016                         preposition = "to"
4017
4018                 msg = "%s %s" % (action_desc, colorize("GOOD", pkg.cpv))
4019
4020                 if pkg.root != "/":
4021                         msg += " %s %s" % (preposition, pkg.root)
4022
4023                 if not self.merge.build_opts.fetchonly and \
4024                         not self.merge.build_opts.pretend and \
4025                         not self.merge.build_opts.buildpkgonly:
4026                         self.merge.statusMessage(msg)
4027
4028                 self.returncode = self.merge.merge()
4029                 self.wait()
4030
4031 class DependencyArg(object):
4032         def __init__(self, arg=None, root_config=None):
4033                 self.arg = arg
4034                 self.root_config = root_config
4035
4036         def __str__(self):
4037                 return str(self.arg)
4038
4039 class AtomArg(DependencyArg):
4040         def __init__(self, atom=None, **kwargs):
4041                 DependencyArg.__init__(self, **kwargs)
4042                 self.atom = atom
4043                 if not isinstance(self.atom, portage.dep.Atom):
4044                         self.atom = portage.dep.Atom(self.atom)
4045                 self.set = (self.atom, )
4046
4047 class PackageArg(DependencyArg):
4048         def __init__(self, package=None, **kwargs):
4049                 DependencyArg.__init__(self, **kwargs)
4050                 self.package = package
4051                 self.atom = portage.dep.Atom("=" + package.cpv)
4052                 self.set = (self.atom, )
4053
4054 class SetArg(DependencyArg):
4055         def __init__(self, set=None, **kwargs):
4056                 DependencyArg.__init__(self, **kwargs)
4057                 self.set = set
4058                 self.name = self.arg[len(SETPREFIX):]
4059
4060 class Dependency(SlotObject):
4061         __slots__ = ("atom", "blocker", "depth",
4062                 "parent", "onlydeps", "priority", "root")
4063         def __init__(self, **kwargs):
4064                 SlotObject.__init__(self, **kwargs)
4065                 if self.priority is None:
4066                         self.priority = DepPriority()
4067                 if self.depth is None:
4068                         self.depth = 0
4069
4070 class BlockerCache(portage.cache.mappings.MutableMapping):
4071         """This caches blockers of installed packages so that dep_check does not
4072         have to be done for every single installed package on every invocation of
4073         emerge.  The cache is invalidated whenever it is detected that something
4074         has changed that might alter the results of dep_check() calls:
4075                 1) the set of installed packages (including COUNTER) has changed
4076                 2) the old-style virtuals have changed
4077         """
4078
4079         # Number of uncached packages to trigger cache update, since
4080         # it's wasteful to update it for every vdb change.
4081         _cache_threshold = 5
4082
4083         class BlockerData(object):
4084
4085                 __slots__ = ("__weakref__", "atoms", "counter")
4086
4087                 def __init__(self, counter, atoms):
4088                         self.counter = counter
4089                         self.atoms = atoms
4090
4091         def __init__(self, myroot, vardb):
4092                 self._vardb = vardb
4093                 self._virtuals = vardb.settings.getvirtuals()
4094                 self._cache_filename = os.path.join(myroot,
4095                         portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4096                 self._cache_version = "1"
4097                 self._cache_data = None
4098                 self._modified = set()
4099                 self._load()
4100
4101         def _load(self):
4102                 try:
4103                         f = open(self._cache_filename, mode='rb')
4104                         mypickle = pickle.Unpickler(f)
4105                         self._cache_data = mypickle.load()
4106                         f.close()
4107                         del f
4108                 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4109                         if isinstance(e, pickle.UnpicklingError):
4110                                 writemsg("!!! Error loading '%s': %s\n" % \
4111                                         (self._cache_filename, str(e)), noiselevel=-1)
4112                         del e
4113
4114                 cache_valid = self._cache_data and \
4115                         isinstance(self._cache_data, dict) and \
4116                         self._cache_data.get("version") == self._cache_version and \
4117                         isinstance(self._cache_data.get("blockers"), dict)
4118                 if cache_valid:
4119                         # Validate all the atoms and counters so that
4120                         # corruption is detected as soon as possible.
4121                         invalid_items = set()
4122                         for k, v in self._cache_data["blockers"].iteritems():
4123                                 if not isinstance(k, basestring):
4124                                         invalid_items.add(k)
4125                                         continue
4126                                 try:
4127                                         if portage.catpkgsplit(k) is None:
4128                                                 invalid_items.add(k)
4129                                                 continue
4130                                 except portage.exception.InvalidData:
4131                                         invalid_items.add(k)
4132                                         continue
4133                                 if not isinstance(v, tuple) or \
4134                                         len(v) != 2:
4135                                         invalid_items.add(k)
4136                                         continue
4137                                 counter, atoms = v
4138                                 if not isinstance(counter, (int, long)):
4139                                         invalid_items.add(k)
4140                                         continue
4141                                 if not isinstance(atoms, (list, tuple)):
4142                                         invalid_items.add(k)
4143                                         continue
4144                                 invalid_atom = False
4145                                 for atom in atoms:
4146                                         if not isinstance(atom, basestring):
4147                                                 invalid_atom = True
4148                                                 break
4149                                         if atom[:1] != "!" or \
4150                                                 not portage.isvalidatom(
4151                                                 atom, allow_blockers=True):
4152                                                 invalid_atom = True
4153                                                 break
4154                                 if invalid_atom:
4155                                         invalid_items.add(k)
4156                                         continue
4157
4158                         for k in invalid_items:
4159                                 del self._cache_data["blockers"][k]
4160                         if not self._cache_data["blockers"]:
4161                                 cache_valid = False
4162
4163                 if not cache_valid:
4164                         self._cache_data = {"version":self._cache_version}
4165                         self._cache_data["blockers"] = {}
4166                         self._cache_data["virtuals"] = self._virtuals
4167                 self._modified.clear()
4168
4169         def flush(self):
4170                 """If the current user has permission and the internal blocker cache
4171                 been updated, save it to disk and mark it unmodified.  This is called
4172                 by emerge after it has proccessed blockers for all installed packages.
4173                 Currently, the cache is only written if the user has superuser
4174                 privileges (since that's required to obtain a lock), but all users
4175                 have read access and benefit from faster blocker lookups (as long as
4176                 the entire cache is still valid).  The cache is stored as a pickled
4177                 dict object with the following format:
4178
4179                 {
4180                         version : "1",
4181                         "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4182                         "virtuals" : vardb.settings.getvirtuals()
4183                 }
4184                 """
4185                 if len(self._modified) >= self._cache_threshold and \
4186                         secpass >= 2:
4187                         try:
4188                                 f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4189                                 pickle.dump(self._cache_data, f, -1)
4190                                 f.close()
4191                                 portage.util.apply_secpass_permissions(
4192                                         self._cache_filename, gid=portage.portage_gid, mode=0644)
4193                         except (IOError, OSError), e:
4194                                 pass
4195                         self._modified.clear()
4196
4197         def __setitem__(self, cpv, blocker_data):
4198                 """
4199                 Update the cache and mark it as modified for a future call to
4200                 self.flush().
4201
4202                 @param cpv: Package for which to cache blockers.
4203                 @type cpv: String
4204                 @param blocker_data: An object with counter and atoms attributes.
4205                 @type blocker_data: BlockerData
4206                 """
4207                 self._cache_data["blockers"][cpv] = \
4208                         (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4209                 self._modified.add(cpv)
4210
4211         def __iter__(self):
4212                 if self._cache_data is None:
4213                         # triggered by python-trace
4214                         return iter([])
4215                 return iter(self._cache_data["blockers"])
4216
4217         def __delitem__(self, cpv):
4218                 del self._cache_data["blockers"][cpv]
4219
4220         def __getitem__(self, cpv):
4221                 """
4222                 @rtype: BlockerData
4223                 @returns: An object with counter and atoms attributes.
4224                 """
4225                 return self.BlockerData(*self._cache_data["blockers"][cpv])
4226
4227 class BlockerDB(object):
4228
4229         def __init__(self, root_config):
4230                 self._root_config = root_config
4231                 self._vartree = root_config.trees["vartree"]
4232                 self._portdb = root_config.trees["porttree"].dbapi
4233
4234                 self._dep_check_trees = None
4235                 self._fake_vartree = None
4236
4237         def _get_fake_vartree(self, acquire_lock=0):
4238                 fake_vartree = self._fake_vartree
4239                 if fake_vartree is None:
4240                         fake_vartree = FakeVartree(self._root_config,
4241                                 acquire_lock=acquire_lock)
4242                         self._fake_vartree = fake_vartree
4243                         self._dep_check_trees = { self._vartree.root : {
4244                                 "porttree"    :  fake_vartree,
4245                                 "vartree"     :  fake_vartree,
4246                         }}
4247                 else:
4248                         fake_vartree.sync(acquire_lock=acquire_lock)
4249                 return fake_vartree
4250
4251         def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4252                 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4253                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4254                 settings = self._vartree.settings
4255                 stale_cache = set(blocker_cache)
4256                 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4257                 dep_check_trees = self._dep_check_trees
4258                 vardb = fake_vartree.dbapi
4259                 installed_pkgs = list(vardb)
4260
4261                 for inst_pkg in installed_pkgs:
4262                         stale_cache.discard(inst_pkg.cpv)
4263                         cached_blockers = blocker_cache.get(inst_pkg.cpv)
4264                         if cached_blockers is not None and \
4265                                 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4266                                 cached_blockers = None
4267                         if cached_blockers is not None:
4268                                 blocker_atoms = cached_blockers.atoms
4269                         else:
4270                                 # Use aux_get() to trigger FakeVartree global
4271                                 # updates on *DEPEND when appropriate.
4272                                 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4273                                 try:
4274                                         portage.dep._dep_check_strict = False
4275                                         success, atoms = portage.dep_check(depstr,
4276                                                 vardb, settings, myuse=inst_pkg.use.enabled,
4277                                                 trees=dep_check_trees, myroot=inst_pkg.root)
4278                                 finally:
4279                                         portage.dep._dep_check_strict = True
4280                                 if not success:
4281                                         pkg_location = os.path.join(inst_pkg.root,
4282                                                 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4283                                         portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4284                                                 (pkg_location, atoms), noiselevel=-1)
4285                                         continue
4286
4287                                 blocker_atoms = [atom for atom in atoms \
4288                                         if atom.startswith("!")]
4289                                 blocker_atoms.sort()
4290                                 counter = long(inst_pkg.metadata["COUNTER"])
4291                                 blocker_cache[inst_pkg.cpv] = \
4292                                         blocker_cache.BlockerData(counter, blocker_atoms)
4293                 for cpv in stale_cache:
4294                         del blocker_cache[cpv]
4295                 blocker_cache.flush()
4296
4297                 blocker_parents = digraph()
4298                 blocker_atoms = []
4299                 for pkg in installed_pkgs:
4300                         for blocker_atom in blocker_cache[pkg.cpv].atoms:
4301                                 blocker_atom = blocker_atom.lstrip("!")
4302                                 blocker_atoms.append(blocker_atom)
4303                                 blocker_parents.add(blocker_atom, pkg)
4304
4305                 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4306                 blocking_pkgs = set()
4307                 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4308                         blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4309
4310                 # Check for blockers in the other direction.
4311                 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4312                 try:
4313                         portage.dep._dep_check_strict = False
4314                         success, atoms = portage.dep_check(depstr,
4315                                 vardb, settings, myuse=new_pkg.use.enabled,
4316                                 trees=dep_check_trees, myroot=new_pkg.root)
4317                 finally:
4318                         portage.dep._dep_check_strict = True
4319                 if not success:
4320                         # We should never get this far with invalid deps.
4321                         show_invalid_depstring_notice(new_pkg, depstr, atoms)
4322                         assert False
4323
4324                 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4325                         if atom[:1] == "!"]
4326                 if blocker_atoms:
4327                         blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4328                         for inst_pkg in installed_pkgs:
4329                                 try:
4330                                         blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4331                                 except (portage.exception.InvalidDependString, StopIteration):
4332                                         continue
4333                                 blocking_pkgs.add(inst_pkg)
4334
4335                 return blocking_pkgs
4336
4337 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4338
4339         msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4340                 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4341         p_type, p_root, p_key, p_status = parent_node
4342         msg = []
4343         if p_status == "nomerge":
4344                 category, pf = portage.catsplit(p_key)
4345                 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4346                 msg.append("Portage is unable to process the dependencies of the ")
4347                 msg.append("'%s' package. " % p_key)
4348                 msg.append("In order to correct this problem, the package ")
4349                 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4350                 msg.append("As a temporary workaround, the --nodeps option can ")
4351                 msg.append("be used to ignore all dependencies.  For reference, ")
4352                 msg.append("the problematic dependencies can be found in the ")
4353                 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4354         else:
4355                 msg.append("This package can not be installed. ")
4356                 msg.append("Please notify the '%s' package maintainer " % p_key)
4357                 msg.append("about this problem.")
4358
4359         msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4360         writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4361
4362 class PackageVirtualDbapi(portage.dbapi):
4363         """
4364         A dbapi-like interface class that represents the state of the installed
4365         package database as new packages are installed, replacing any packages
4366         that previously existed in the same slot. The main difference between
4367         this class and fakedbapi is that this one uses Package instances
4368         internally (passed in via cpv_inject() and cpv_remove() calls).
4369         """
4370         def __init__(self, settings):
4371                 portage.dbapi.__init__(self)
4372                 self.settings = settings
4373                 self._match_cache = {}
4374                 self._cp_map = {}
4375                 self._cpv_map = {}
4376
4377         def clear(self):
4378                 """
4379                 Remove all packages.
4380                 """
4381                 if self._cpv_map:
4382                         self._clear_cache()
4383                         self._cp_map.clear()
4384                         self._cpv_map.clear()
4385
4386         def copy(self):
4387                 obj = PackageVirtualDbapi(self.settings)
4388                 obj._match_cache = self._match_cache.copy()
4389                 obj._cp_map = self._cp_map.copy()
4390                 for k, v in obj._cp_map.iteritems():
4391                         obj._cp_map[k] = v[:]
4392                 obj._cpv_map = self._cpv_map.copy()
4393                 return obj
4394
4395         def __iter__(self):
4396                 return self._cpv_map.itervalues()
4397
4398         def __contains__(self, item):
4399                 existing = self._cpv_map.get(item.cpv)
4400                 if existing is not None and \
4401                         existing == item:
4402                         return True
4403                 return False
4404
4405         def get(self, item, default=None):
4406                 cpv = getattr(item, "cpv", None)
4407                 if cpv is None:
4408                         if len(item) != 4:
4409                                 return default
4410                         type_name, root, cpv, operation = item
4411
4412                 existing = self._cpv_map.get(cpv)
4413                 if existing is not None and \
4414                         existing == item:
4415                         return existing
4416                 return default
4417
4418         def match_pkgs(self, atom):
4419                 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4420
4421         def _clear_cache(self):
4422                 if self._categories is not None:
4423                         self._categories = None
4424                 if self._match_cache:
4425                         self._match_cache = {}
4426
4427         def match(self, origdep, use_cache=1):
4428                 result = self._match_cache.get(origdep)
4429                 if result is not None:
4430                         return result[:]
4431                 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4432                 self._match_cache[origdep] = result
4433                 return result[:]
4434
4435         def cpv_exists(self, cpv):
4436                 return cpv in self._cpv_map
4437
4438         def cp_list(self, mycp, use_cache=1):
4439                 cachelist = self._match_cache.get(mycp)
4440                 # cp_list() doesn't expand old-style virtuals
4441                 if cachelist and cachelist[0].startswith(mycp):
4442                         return cachelist[:]
4443                 cpv_list = self._cp_map.get(mycp)
4444                 if cpv_list is None:
4445                         cpv_list = []
4446                 else:
4447                         cpv_list = [pkg.cpv for pkg in cpv_list]
4448                 self._cpv_sort_ascending(cpv_list)
4449                 if not (not cpv_list and mycp.startswith("virtual/")):
4450                         self._match_cache[mycp] = cpv_list
4451                 return cpv_list[:]
4452
4453         def cp_all(self):
4454                 return list(self._cp_map)
4455
4456         def cpv_all(self):
4457                 return list(self._cpv_map)
4458
4459         def cpv_inject(self, pkg):
4460                 cp_list = self._cp_map.get(pkg.cp)
4461                 if cp_list is None:
4462                         cp_list = []
4463                         self._cp_map[pkg.cp] = cp_list
4464                 e_pkg = self._cpv_map.get(pkg.cpv)
4465                 if e_pkg is not None:
4466                         if e_pkg == pkg:
4467                                 return
4468                         self.cpv_remove(e_pkg)
4469                 for e_pkg in cp_list:
4470                         if e_pkg.slot_atom == pkg.slot_atom:
4471                                 if e_pkg == pkg:
4472                                         return
4473                                 self.cpv_remove(e_pkg)
4474                                 break
4475                 cp_list.append(pkg)
4476                 self._cpv_map[pkg.cpv] = pkg
4477                 self._clear_cache()
4478
4479         def cpv_remove(self, pkg):
4480                 old_pkg = self._cpv_map.get(pkg.cpv)
4481                 if old_pkg != pkg:
4482                         raise KeyError(pkg)
4483                 self._cp_map[pkg.cp].remove(pkg)
4484                 del self._cpv_map[pkg.cpv]
4485                 self._clear_cache()
4486
4487         def aux_get(self, cpv, wants):
4488                 metadata = self._cpv_map[cpv].metadata
4489                 return [metadata.get(x, "") for x in wants]
4490
4491         def aux_update(self, cpv, values):
4492                 self._cpv_map[cpv].metadata.update(values)
4493                 self._clear_cache()
4494
4495 class depgraph(object):
4496
4497         pkg_tree_map = RootConfig.pkg_tree_map
4498
4499         _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4500
4501         def __init__(self, settings, trees, myopts, myparams, spinner):
4502                 self.settings = settings
4503                 self.target_root = settings["ROOT"]
4504                 self.myopts = myopts
4505                 self.myparams = myparams
4506                 self.edebug = 0
4507                 if settings.get("PORTAGE_DEBUG", "") == "1":
4508                         self.edebug = 1
4509                 self.spinner = spinner
4510                 self._running_root = trees["/"]["root_config"]
4511                 self._opts_no_restart = Scheduler._opts_no_restart
4512                 self.pkgsettings = {}
4513                 # Maps slot atom to package for each Package added to the graph.
4514                 self._slot_pkg_map = {}
4515                 # Maps nodes to the reasons they were selected for reinstallation.
4516                 self._reinstall_nodes = {}
4517                 self.mydbapi = {}
4518                 self.trees = {}
4519                 self._trees_orig = trees
4520                 self.roots = {}
4521                 # Contains a filtered view of preferred packages that are selected
4522                 # from available repositories.
4523                 self._filtered_trees = {}
4524                 # Contains installed packages and new packages that have been added
4525                 # to the graph.
4526                 self._graph_trees = {}
4527                 # All Package instances
4528                 self._pkg_cache = {}
4529                 for myroot in trees:
4530                         self.trees[myroot] = {}
4531                         # Create a RootConfig instance that references
4532                         # the FakeVartree instead of the real one.
4533                         self.roots[myroot] = RootConfig(
4534                                 trees[myroot]["vartree"].settings,
4535                                 self.trees[myroot],
4536                                 trees[myroot]["root_config"].setconfig)
4537                         for tree in ("porttree", "bintree"):
4538                                 self.trees[myroot][tree] = trees[myroot][tree]
4539                         self.trees[myroot]["vartree"] = \
4540                                 FakeVartree(trees[myroot]["root_config"],
4541                                         pkg_cache=self._pkg_cache)
4542                         self.pkgsettings[myroot] = portage.config(
4543                                 clone=self.trees[myroot]["vartree"].settings)
4544                         self._slot_pkg_map[myroot] = {}
4545                         vardb = self.trees[myroot]["vartree"].dbapi
4546                         preload_installed_pkgs = "--nodeps" not in self.myopts and \
4547                                 "--buildpkgonly" not in self.myopts
4548                         # This fakedbapi instance will model the state that the vdb will
4549                         # have after new packages have been installed.
4550                         fakedb = PackageVirtualDbapi(vardb.settings)
4551                         if preload_installed_pkgs:
4552                                 for pkg in vardb:
4553                                         self.spinner.update()
4554                                         # This triggers metadata updates via FakeVartree.
4555                                         vardb.aux_get(pkg.cpv, [])
4556                                         fakedb.cpv_inject(pkg)
4557
4558                         # Now that the vardb state is cached in our FakeVartree,
4559                         # we won't be needing the real vartree cache for awhile.
4560                         # To make some room on the heap, clear the vardbapi
4561                         # caches.
4562                         trees[myroot]["vartree"].dbapi._clear_cache()
4563                         gc.collect()
4564
4565                         self.mydbapi[myroot] = fakedb
4566                         def graph_tree():
4567                                 pass
4568                         graph_tree.dbapi = fakedb
4569                         self._graph_trees[myroot] = {}
4570                         self._filtered_trees[myroot] = {}
4571                         # Substitute the graph tree for the vartree in dep_check() since we
4572                         # want atom selections to be consistent with package selections
4573                         # have already been made.
4574                         self._graph_trees[myroot]["porttree"]   = graph_tree
4575                         self._graph_trees[myroot]["vartree"]    = graph_tree
4576                         def filtered_tree():
4577                                 pass
4578                         filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4579                         self._filtered_trees[myroot]["porttree"] = filtered_tree
4580
4581                         # Passing in graph_tree as the vartree here could lead to better
4582                         # atom selections in some cases by causing atoms for packages that
4583                         # have been added to the graph to be preferred over other choices.
4584                         # However, it can trigger atom selections that result in
4585                         # unresolvable direct circular dependencies. For example, this
4586                         # happens with gwydion-dylan which depends on either itself or
4587                         # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4588                         # gwydion-dylan-bin needs to be selected in order to avoid a
4589                         # an unresolvable direct circular dependency.
4590                         #
4591                         # To solve the problem described above, pass in "graph_db" so that
4592                         # packages that have been added to the graph are distinguishable
4593                         # from other available packages and installed packages. Also, pass
4594                         # the parent package into self._select_atoms() calls so that
4595                         # unresolvable direct circular dependencies can be detected and
4596                         # avoided when possible.
4597                         self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4598                         self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4599
4600                         dbs = []
4601                         portdb = self.trees[myroot]["porttree"].dbapi
4602                         bindb  = self.trees[myroot]["bintree"].dbapi
4603                         vardb  = self.trees[myroot]["vartree"].dbapi
4604                         #               (db, pkg_type, built, installed, db_keys)
4605                         if "--usepkgonly" not in self.myopts:
4606                                 db_keys = list(portdb._aux_cache_keys)
4607                                 dbs.append((portdb, "ebuild", False, False, db_keys))
4608                         if "--usepkg" in self.myopts:
4609                                 db_keys = list(bindb._aux_cache_keys)
4610                                 dbs.append((bindb,  "binary", True, False, db_keys))
4611                         db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4612                         dbs.append((vardb, "installed", True, True, db_keys))
4613                         self._filtered_trees[myroot]["dbs"] = dbs
4614                         if "--usepkg" in self.myopts:
4615                                 self.trees[myroot]["bintree"].populate(
4616                                         "--getbinpkg" in self.myopts,
4617                                         "--getbinpkgonly" in self.myopts)
4618                 del trees
4619
4620                 self.digraph=portage.digraph()
4621                 # contains all sets added to the graph
4622                 self._sets = {}
4623                 # contains atoms given as arguments
4624                 self._sets["args"] = InternalPackageSet()
4625                 # contains all atoms from all sets added to the graph, including
4626                 # atoms given as arguments
4627                 self._set_atoms = InternalPackageSet()
4628                 self._atom_arg_map = {}
4629                 # contains all nodes pulled in by self._set_atoms
4630                 self._set_nodes = set()
4631                 # Contains only Blocker -> Uninstall edges
4632                 self._blocker_uninstalls = digraph()
4633                 # Contains only Package -> Blocker edges
4634                 self._blocker_parents = digraph()
4635                 # Contains only irrelevant Package -> Blocker edges
4636                 self._irrelevant_blockers = digraph()
4637                 # Contains only unsolvable Package -> Blocker edges
4638                 self._unsolvable_blockers = digraph()
4639                 # Contains all Blocker -> Blocked Package edges
4640                 self._blocked_pkgs = digraph()
4641                 # Contains world packages that have been protected from
4642                 # uninstallation but may not have been added to the graph
4643                 # if the graph is not complete yet.
4644                 self._blocked_world_pkgs = {}
4645                 self._slot_collision_info = {}
4646                 # Slot collision nodes are not allowed to block other packages since
4647                 # blocker validation is only able to account for one package per slot.
4648                 self._slot_collision_nodes = set()
4649                 self._parent_atoms = {}
4650                 self._slot_conflict_parent_atoms = set()
4651                 self._serialized_tasks_cache = None
4652                 self._scheduler_graph = None
4653                 self._displayed_list = None
4654                 self._pprovided_args = []
4655                 self._missing_args = []
4656                 self._masked_installed = set()
4657                 self._unsatisfied_deps_for_display = []
4658                 self._unsatisfied_blockers_for_display = None
4659                 self._circular_deps_for_display = None
4660                 self._dep_stack = []
4661                 self._unsatisfied_deps = []
4662                 self._initially_unsatisfied_deps = []
4663                 self._ignored_deps = []
4664                 self._required_set_names = set(["system", "world"])
4665                 self._select_atoms = self._select_atoms_highest_available
4666                 self._select_package = self._select_pkg_highest_available
4667                 self._highest_pkg_cache = {}
4668
4669         def _show_slot_collision_notice(self):
4670                 """Show an informational message advising the user to mask one of the
4671                 the packages. In some cases it may be possible to resolve this
4672                 automatically, but support for backtracking (removal nodes that have
4673                 already been selected) will be required in order to handle all possible
4674                 cases.
4675                 """
4676
4677                 if not self._slot_collision_info:
4678                         return
4679
4680                 self._show_merge_list()
4681
4682                 msg = []
4683                 msg.append("\n!!! Multiple package instances within a single " + \
4684                         "package slot have been pulled\n")
4685                 msg.append("!!! into the dependency graph, resulting" + \
4686                         " in a slot conflict:\n\n")
4687                 indent = "  "
4688                 # Max number of parents shown, to avoid flooding the display.
4689                 max_parents = 3
4690                 explanation_columns = 70
4691                 explanations = 0
4692                 for (slot_atom, root), slot_nodes \
4693                         in self._slot_collision_info.iteritems():
4694                         msg.append(str(slot_atom))
4695                         msg.append("\n\n")
4696
4697                         for node in slot_nodes:
4698                                 msg.append(indent)
4699                                 msg.append(str(node))
4700                                 parent_atoms = self._parent_atoms.get(node)
4701                                 if parent_atoms:
4702                                         pruned_list = set()
4703                                         # Prefer conflict atoms over others.
4704                                         for parent_atom in parent_atoms:
4705                                                 if len(pruned_list) >= max_parents:
4706                                                         break
4707                                                 if parent_atom in self._slot_conflict_parent_atoms:
4708                                                         pruned_list.add(parent_atom)
4709
4710                                         # If this package was pulled in by conflict atoms then
4711                                         # show those alone since those are the most interesting.
4712                                         if not pruned_list:
4713                                                 # When generating the pruned list, prefer instances
4714                                                 # of DependencyArg over instances of Package.
4715                                                 for parent_atom in parent_atoms:
4716                                                         if len(pruned_list) >= max_parents:
4717                                                                 break
4718                                                         parent, atom = parent_atom
4719                                                         if isinstance(parent, DependencyArg):
4720                                                                 pruned_list.add(parent_atom)
4721                                                 # Prefer Packages instances that themselves have been
4722                                                 # pulled into collision slots.
4723                                                 for parent_atom in parent_atoms:
4724                                                         if len(pruned_list) >= max_parents:
4725                                                                 break
4726                                                         parent, atom = parent_atom
4727                                                         if isinstance(parent, Package) and \
4728                                                                 (parent.slot_atom, parent.root) \
4729                                                                 in self._slot_collision_info:
4730                                                                 pruned_list.add(parent_atom)
4731                                                 for parent_atom in parent_atoms:
4732                                                         if len(pruned_list) >= max_parents:
4733                                                                 break
4734                                                         pruned_list.add(parent_atom)
4735                                         omitted_parents = len(parent_atoms) - len(pruned_list)
4736                                         parent_atoms = pruned_list
4737                                         msg.append(" pulled in by\n")
4738                                         for parent_atom in parent_atoms:
4739                                                 parent, atom = parent_atom
4740                                                 msg.append(2*indent)
4741                                                 if isinstance(parent,
4742                                                         (PackageArg, AtomArg)):
4743                                                         # For PackageArg and AtomArg types, it's
4744                                                         # redundant to display the atom attribute.
4745                                                         msg.append(str(parent))
4746                                                 else:
4747                                                         # Display the specific atom from SetArg or
4748                                                         # Package types.
4749                                                         msg.append("%s required by %s" % (atom, parent))
4750                                                 msg.append("\n")
4751                                         if omitted_parents:
4752                                                 msg.append(2*indent)
4753                                                 msg.append("(and %d more)\n" % omitted_parents)
4754                                 else:
4755                                         msg.append(" (no parents)\n")
4756                                 msg.append("\n")
4757                         explanation = self._slot_conflict_explanation(slot_nodes)
4758                         if explanation:
4759                                 explanations += 1
4760                                 msg.append(indent + "Explanation:\n\n")
4761                                 for line in textwrap.wrap(explanation, explanation_columns):
4762                                         msg.append(2*indent + line + "\n")
4763                                 msg.append("\n")
4764                 msg.append("\n")
4765                 sys.stderr.write("".join(msg))
4766                 sys.stderr.flush()
4767
4768                 explanations_for_all = explanations == len(self._slot_collision_info)
4769
4770                 if explanations_for_all or "--quiet" in self.myopts:
4771                         return
4772
4773                 msg = []
4774                 msg.append("It may be possible to solve this problem ")
4775                 msg.append("by using package.mask to prevent one of ")
4776                 msg.append("those packages from being selected. ")
4777                 msg.append("However, it is also possible that conflicting ")
4778                 msg.append("dependencies exist such that they are impossible to ")
4779                 msg.append("satisfy simultaneously.  If such a conflict exists in ")
4780                 msg.append("the dependencies of two different packages, then those ")
4781                 msg.append("packages can not be installed simultaneously.")
4782
4783                 from formatter import AbstractFormatter, DumbWriter
4784                 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4785                 for x in msg:
4786                         f.add_flowing_data(x)
4787                 f.end_paragraph(1)
4788
4789                 msg = []
4790                 msg.append("For more information, see MASKED PACKAGES ")
4791                 msg.append("section in the emerge man page or refer ")
4792                 msg.append("to the Gentoo Handbook.")
4793                 for x in msg:
4794                         f.add_flowing_data(x)
4795                 f.end_paragraph(1)
4796                 f.writer.flush()
4797
4798         def _slot_conflict_explanation(self, slot_nodes):
4799                 """
4800                 When a slot conflict occurs due to USE deps, there are a few
4801                 different cases to consider:
4802
4803                 1) New USE are correctly set but --newuse wasn't requested so an
4804                    installed package with incorrect USE happened to get pulled
4805                    into graph before the new one.
4806
4807                 2) New USE are incorrectly set but an installed package has correct
4808                    USE so it got pulled into the graph, and a new instance also got
4809                    pulled in due to --newuse or an upgrade.
4810
4811                 3) Multiple USE deps exist that can't be satisfied simultaneously,
4812                    and multiple package instances got pulled into the same slot to
4813                    satisfy the conflicting deps.
4814
4815                 Currently, explanations and suggested courses of action are generated
4816                 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4817                 """
4818
4819                 if len(slot_nodes) != 2:
4820                         # Suggestions are only implemented for
4821                         # conflicts between two packages.
4822                         return None
4823
4824                 all_conflict_atoms = self._slot_conflict_parent_atoms
4825                 matched_node = None
4826                 matched_atoms = None
4827                 unmatched_node = None
4828                 for node in slot_nodes:
4829                         parent_atoms = self._parent_atoms.get(node)
4830                         if not parent_atoms:
4831                                 # Normally, there are always parent atoms. If there are
4832                                 # none then something unexpected is happening and there's
4833                                 # currently no suggestion for this case.
4834                                 return None
4835                         conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4836                         for parent_atom in conflict_atoms:
4837                                 parent, atom = parent_atom
4838                                 if not atom.use:
4839                                         # Suggestions are currently only implemented for cases
4840                                         # in which all conflict atoms have USE deps.
4841                                         return None
4842                         if conflict_atoms:
4843                                 if matched_node is not None:
4844                                         # If conflict atoms match multiple nodes
4845                                         # then there's no suggestion.
4846                                         return None
4847                                 matched_node = node
4848                                 matched_atoms = conflict_atoms
4849                         else:
4850                                 if unmatched_node is not None:
4851                                         # Neither node is matched by conflict atoms, and
4852                                         # there is no suggestion for this case.
4853                                         return None
4854                                 unmatched_node = node
4855
4856                 if matched_node is None or unmatched_node is None:
4857                         # This shouldn't happen.
4858                         return None
4859
4860                 if unmatched_node.installed and not matched_node.installed:
4861                         return "New USE are correctly set, but --newuse wasn't" + \
4862                                 " requested, so an installed package with incorrect USE " + \
4863                                 "happened to get pulled into the dependency graph. " + \
4864                                 "In order to solve " + \
4865                                 "this, either specify the --newuse option or explicitly " + \
4866                                 " reinstall '%s'." % matched_node.slot_atom
4867
4868                 if matched_node.installed and not unmatched_node.installed:
4869                         atoms = sorted(set(atom for parent, atom in matched_atoms))
4870                         explanation = ("New USE for '%s' are incorrectly set. " + \
4871                                 "In order to solve this, adjust USE to satisfy '%s'") % \
4872                                 (matched_node.slot_atom, atoms[0])
4873                         if len(atoms) > 1:
4874                                 for atom in atoms[1:-1]:
4875                                         explanation += ", '%s'" % (atom,)
4876                                 if len(atoms) > 2:
4877                                         explanation += ","
4878                                 explanation += " and '%s'" % (atoms[-1],)
4879                         explanation += "."
4880                         return explanation
4881
4882                 return None
4883
4884         def _process_slot_conflicts(self):
4885                 """
4886                 Process slot conflict data to identify specific atoms which
4887                 lead to conflict. These atoms only match a subset of the
4888                 packages that have been pulled into a given slot.
4889                 """
4890                 for (slot_atom, root), slot_nodes \
4891                         in self._slot_collision_info.iteritems():
4892
4893                         all_parent_atoms = set()
4894                         for pkg in slot_nodes:
4895                                 parent_atoms = self._parent_atoms.get(pkg)
4896                                 if not parent_atoms:
4897                                         continue
4898                                 all_parent_atoms.update(parent_atoms)
4899
4900                         for pkg in slot_nodes:
4901                                 parent_atoms = self._parent_atoms.get(pkg)
4902                                 if parent_atoms is None:
4903                                         parent_atoms = set()
4904                                         self._parent_atoms[pkg] = parent_atoms
4905                                 for parent_atom in all_parent_atoms:
4906                                         if parent_atom in parent_atoms:
4907                                                 continue
4908                                         # Use package set for matching since it will match via
4909                                         # PROVIDE when necessary, while match_from_list does not.
4910                                         parent, atom = parent_atom
4911                                         atom_set = InternalPackageSet(
4912                                                 initial_atoms=(atom,))
4913                                         if atom_set.findAtomForPackage(pkg):
4914                                                 parent_atoms.add(parent_atom)
4915                                         else:
4916                                                 self._slot_conflict_parent_atoms.add(parent_atom)
4917
4918         def _reinstall_for_flags(self, forced_flags,
4919                 orig_use, orig_iuse, cur_use, cur_iuse):
4920                 """Return a set of flags that trigger reinstallation, or None if there
4921                 are no such flags."""
4922                 if "--newuse" in self.myopts:
4923                         flags = set(orig_iuse.symmetric_difference(
4924                                 cur_iuse).difference(forced_flags))
4925                         flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4926                                 cur_iuse.intersection(cur_use)))
4927                         if flags:
4928                                 return flags
4929                 elif "changed-use" == self.myopts.get("--reinstall"):
4930                         flags = orig_iuse.intersection(orig_use).symmetric_difference(
4931                                 cur_iuse.intersection(cur_use))
4932                         if flags:
4933                                 return flags
4934                 return None
4935
4936         def _create_graph(self, allow_unsatisfied=False):
4937                 dep_stack = self._dep_stack
4938                 while dep_stack:
4939                         self.spinner.update()
4940                         dep = dep_stack.pop()
4941                         if isinstance(dep, Package):
4942                                 if not self._add_pkg_deps(dep,
4943                                         allow_unsatisfied=allow_unsatisfied):
4944                                         return 0
4945                                 continue
4946                         if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4947                                 return 0
4948                 return 1
4949
4950         def _add_dep(self, dep, allow_unsatisfied=False):
4951                 debug = "--debug" in self.myopts
4952                 buildpkgonly = "--buildpkgonly" in self.myopts
4953                 nodeps = "--nodeps" in self.myopts
4954                 empty = "empty" in self.myparams
4955                 deep = "deep" in self.myparams
4956                 update = "--update" in self.myopts and dep.depth <= 1
4957                 if dep.blocker:
4958                         if not buildpkgonly and \
4959                                 not nodeps and \
4960                                 dep.parent not in self._slot_collision_nodes:
4961                                 if dep.parent.onlydeps:
4962                                         # It's safe to ignore blockers if the
4963                                         # parent is an --onlydeps node.
4964                                         return 1
4965                                 # The blocker applies to the root where
4966                                 # the parent is or will be installed.
4967                                 blocker = Blocker(atom=dep.atom,
4968                                         eapi=dep.parent.metadata["EAPI"],
4969                                         root=dep.parent.root)
4970                                 self._blocker_parents.add(blocker, dep.parent)
4971                         return 1
4972                 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
4973                         onlydeps=dep.onlydeps)
4974                 if not dep_pkg:
4975                         if dep.priority.optional:
4976                                 # This could be an unecessary build-time dep
4977                                 # pulled in by --with-bdeps=y.
4978                                 return 1
4979                         if allow_unsatisfied:
4980                                 self._unsatisfied_deps.append(dep)
4981                                 return 1
4982                         self._unsatisfied_deps_for_display.append(
4983                                 ((dep.root, dep.atom), {"myparent":dep.parent}))
4984                         return 0
4985                 # In some cases, dep_check will return deps that shouldn't
4986                 # be proccessed any further, so they are identified and
4987                 # discarded here. Try to discard as few as possible since
4988                 # discarded dependencies reduce the amount of information
4989                 # available for optimization of merge order.
4990                 if dep.priority.satisfied and \
4991                         not dep_pkg.installed and \
4992                         not (existing_node or empty or deep or update):
4993                         myarg = None
4994                         if dep.root == self.target_root:
4995                                 try:
4996                                         myarg = self._iter_atoms_for_pkg(dep_pkg).next()
4997                                 except StopIteration:
4998                                         pass
4999                                 except portage.exception.InvalidDependString:
5000                                         if not dep_pkg.installed:
5001                                                 # This shouldn't happen since the package
5002                                                 # should have been masked.
5003                                                 raise
5004                         if not myarg:
5005                                 self._ignored_deps.append(dep)
5006                                 return 1
5007
5008                 if not self._add_pkg(dep_pkg, dep):
5009                         return 0
5010                 return 1
5011
5012         def _add_pkg(self, pkg, dep):
5013                 myparent = None
5014                 priority = None
5015                 depth = 0
5016                 if dep is None:
5017                         dep = Dependency()
5018                 else:
5019                         myparent = dep.parent
5020                         priority = dep.priority
5021                         depth = dep.depth
5022                 if priority is None:
5023                         priority = DepPriority()
5024                 """
5025                 Fills the digraph with nodes comprised of packages to merge.
5026                 mybigkey is the package spec of the package to merge.
5027                 myparent is the package depending on mybigkey ( or None )
5028                 addme = Should we add this package to the digraph or are we just looking at it's deps?
5029                         Think --onlydeps, we need to ignore packages in that case.
5030                 #stuff to add:
5031                 #SLOT-aware emerge
5032                 #IUSE-aware emerge -> USE DEP aware depgraph
5033                 #"no downgrade" emerge
5034                 """
5035                 # Ensure that the dependencies of the same package
5036                 # are never processed more than once.
5037                 previously_added = pkg in self.digraph
5038
5039                 # select the correct /var database that we'll be checking against
5040                 vardbapi = self.trees[pkg.root]["vartree"].dbapi
5041                 pkgsettings = self.pkgsettings[pkg.root]
5042
5043                 arg_atoms = None
5044                 if True:
5045                         try:
5046                                 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
5047                         except portage.exception.InvalidDependString, e:
5048                                 if not pkg.installed:
5049                                         show_invalid_depstring_notice(
5050                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5051                                         return 0
5052                                 del e
5053
5054                 if not pkg.onlydeps:
5055                         if not pkg.installed and \
5056                                 "empty" not in self.myparams and \
5057                                 vardbapi.match(pkg.slot_atom):
5058                                 # Increase the priority of dependencies on packages that
5059                                 # are being rebuilt. This optimizes merge order so that
5060                                 # dependencies are rebuilt/updated as soon as possible,
5061                                 # which is needed especially when emerge is called by
5062                                 # revdep-rebuild since dependencies may be affected by ABI
5063                                 # breakage that has rendered them useless. Don't adjust
5064                                 # priority here when in "empty" mode since all packages
5065                                 # are being merged in that case.
5066                                 priority.rebuild = True
5067
5068                         existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
5069                         slot_collision = False
5070                         if existing_node:
5071                                 existing_node_matches = pkg.cpv == existing_node.cpv
5072                                 if existing_node_matches and \
5073                                         pkg != existing_node and \
5074                                         dep.atom is not None:
5075                                         # Use package set for matching since it will match via
5076                                         # PROVIDE when necessary, while match_from_list does not.
5077                                         atom_set = InternalPackageSet(initial_atoms=[dep.atom])
5078                                         if not atom_set.findAtomForPackage(existing_node):
5079                                                 existing_node_matches = False
5080                                 if existing_node_matches:
5081                                         # The existing node can be reused.
5082                                         if arg_atoms:
5083                                                 for parent_atom in arg_atoms:
5084                                                         parent, atom = parent_atom
5085                                                         self.digraph.add(existing_node, parent,
5086                                                                 priority=priority)
5087                                                         self._add_parent_atom(existing_node, parent_atom)
5088                                         # If a direct circular dependency is not an unsatisfied
5089                                         # buildtime dependency then drop it here since otherwise
5090                                         # it can skew the merge order calculation in an unwanted
5091                                         # way.
5092                                         if existing_node != myparent or \
5093                                                 (priority.buildtime and not priority.satisfied):
5094                                                 self.digraph.addnode(existing_node, myparent,
5095                                                         priority=priority)
5096                                                 if dep.atom is not None and dep.parent is not None:
5097                                                         self._add_parent_atom(existing_node,
5098                                                                 (dep.parent, dep.atom))
5099                                         return 1
5100                                 else:
5101
5102                                         # A slot collision has occurred.  Sometimes this coincides
5103                                         # with unresolvable blockers, so the slot collision will be
5104                                         # shown later if there are no unresolvable blockers.
5105                                         self._add_slot_conflict(pkg)
5106                                         slot_collision = True
5107
5108                         if slot_collision:
5109                                 # Now add this node to the graph so that self.display()
5110                                 # can show use flags and --tree portage.output.  This node is
5111                                 # only being partially added to the graph.  It must not be
5112                                 # allowed to interfere with the other nodes that have been
5113                                 # added.  Do not overwrite data for existing nodes in
5114                                 # self.mydbapi since that data will be used for blocker
5115                                 # validation.
5116                                 # Even though the graph is now invalid, continue to process
5117                                 # dependencies so that things like --fetchonly can still
5118                                 # function despite collisions.
5119                                 pass
5120                         elif not previously_added:
5121                                 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
5122                                 self.mydbapi[pkg.root].cpv_inject(pkg)
5123                                 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
5124
5125                         if not pkg.installed:
5126                                 # Allow this package to satisfy old-style virtuals in case it
5127                                 # doesn't already. Any pre-existing providers will be preferred
5128                                 # over this one.
5129                                 try:
5130                                         pkgsettings.setinst(pkg.cpv, pkg.metadata)
5131                                         # For consistency, also update the global virtuals.
5132                                         settings = self.roots[pkg.root].settings
5133                                         settings.unlock()
5134                                         settings.setinst(pkg.cpv, pkg.metadata)
5135                                         settings.lock()
5136                                 except portage.exception.InvalidDependString, e:
5137                                         show_invalid_depstring_notice(
5138                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5139                                         del e
5140                                         return 0
5141
5142                 if arg_atoms:
5143                         self._set_nodes.add(pkg)
5144
5145                 # Do this even when addme is False (--onlydeps) so that the
5146                 # parent/child relationship is always known in case
5147                 # self._show_slot_collision_notice() needs to be called later.
5148                 self.digraph.add(pkg, myparent, priority=priority)
5149                 if dep.atom is not None and dep.parent is not None:
5150                         self._add_parent_atom(pkg, (dep.parent, dep.atom))
5151
5152                 if arg_atoms:
5153                         for parent_atom in arg_atoms:
5154                                 parent, atom = parent_atom
5155                                 self.digraph.add(pkg, parent, priority=priority)
5156                                 self._add_parent_atom(pkg, parent_atom)
5157
5158                 """ This section determines whether we go deeper into dependencies or not.
5159                     We want to go deeper on a few occasions:
5160                     Installing package A, we need to make sure package A's deps are met.
5161                     emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5162                     If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5163                 """
5164                 dep_stack = self._dep_stack
5165                 if "recurse" not in self.myparams:
5166                         return 1
5167                 elif pkg.installed and \
5168                         "deep" not in self.myparams:
5169                         dep_stack = self._ignored_deps
5170
5171                 self.spinner.update()
5172
5173                 if arg_atoms:
5174                         depth = 0
5175                 pkg.depth = depth
5176                 if not previously_added:
5177                         dep_stack.append(pkg)
5178                 return 1
5179
5180         def _add_parent_atom(self, pkg, parent_atom):
5181                 parent_atoms = self._parent_atoms.get(pkg)
5182                 if parent_atoms is None:
5183                         parent_atoms = set()
5184                         self._parent_atoms[pkg] = parent_atoms
5185                 parent_atoms.add(parent_atom)
5186
5187         def _add_slot_conflict(self, pkg):
5188                 self._slot_collision_nodes.add(pkg)
5189                 slot_key = (pkg.slot_atom, pkg.root)
5190                 slot_nodes = self._slot_collision_info.get(slot_key)
5191                 if slot_nodes is None:
5192                         slot_nodes = set()
5193                         slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5194                         self._slot_collision_info[slot_key] = slot_nodes
5195                 slot_nodes.add(pkg)
5196
5197         def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5198
5199                 mytype = pkg.type_name
5200                 myroot = pkg.root
5201                 mykey = pkg.cpv
5202                 metadata = pkg.metadata
5203                 myuse = pkg.use.enabled
5204                 jbigkey = pkg
5205                 depth = pkg.depth + 1
5206                 removal_action = "remove" in self.myparams
5207
5208                 edepend={}
5209                 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5210                 for k in depkeys:
5211                         edepend[k] = metadata[k]
5212
5213                 if not pkg.built and \
5214                         "--buildpkgonly" in self.myopts and \
5215                         "deep" not in self.myparams and \
5216                         "empty" not in self.myparams:
5217                         edepend["RDEPEND"] = ""
5218                         edepend["PDEPEND"] = ""
5219                 bdeps_optional = False
5220
5221                 if pkg.built and not removal_action:
5222                         if self.myopts.get("--with-bdeps", "n") == "y":
5223                                 # Pull in build time deps as requested, but marked them as
5224                                 # "optional" since they are not strictly required. This allows
5225                                 # more freedom in the merge order calculation for solving
5226                                 # circular dependencies. Don't convert to PDEPEND since that
5227                                 # could make --with-bdeps=y less effective if it is used to
5228                                 # adjust merge order to prevent built_with_use() calls from
5229                                 # failing.
5230                                 bdeps_optional = True
5231                         else:
5232                                 # built packages do not have build time dependencies.
5233                                 edepend["DEPEND"] = ""
5234
5235                 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5236                         edepend["DEPEND"] = ""
5237
5238                 deps = (
5239                         ("/", edepend["DEPEND"],
5240                                 self._priority(buildtime=(not bdeps_optional),
5241                                 optional=bdeps_optional)),
5242                         (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5243                         (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5244                 )
5245
5246                 debug = "--debug" in self.myopts
5247                 strict = mytype != "installed"
5248                 try:
5249                         for dep_root, dep_string, dep_priority in deps:
5250                                 if not dep_string:
5251                                         continue
5252                                 if debug:
5253                                         print
5254                                         print "Parent:   ", jbigkey
5255                                         print "Depstring:", dep_string
5256                                         print "Priority:", dep_priority
5257                                 vardb = self.roots[dep_root].trees["vartree"].dbapi
5258                                 try:
5259                                         selected_atoms = self._select_atoms(dep_root,
5260                                                 dep_string, myuse=myuse, parent=pkg, strict=strict,
5261                                                 priority=dep_priority)
5262                                 except portage.exception.InvalidDependString, e:
5263                                         show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5264                                         return 0
5265                                 if debug:
5266                                         print "Candidates:", selected_atoms
5267
5268                                 for atom in selected_atoms:
5269                                         try:
5270
5271                                                 atom = portage.dep.Atom(atom)
5272
5273                                                 mypriority = dep_priority.copy()
5274                                                 if not atom.blocker and vardb.match(atom):
5275                                                         mypriority.satisfied = True
5276
5277                                                 if not self._add_dep(Dependency(atom=atom,
5278                                                         blocker=atom.blocker, depth=depth, parent=pkg,
5279                                                         priority=mypriority, root=dep_root),
5280                                                         allow_unsatisfied=allow_unsatisfied):
5281                                                         return 0
5282
5283                                         except portage.exception.InvalidAtom, e:
5284                                                 show_invalid_depstring_notice(
5285                                                         pkg, dep_string, str(e))
5286                                                 del e
5287                                                 if not pkg.installed:
5288                                                         return 0
5289
5290                                 if debug:
5291                                         print "Exiting...", jbigkey
5292                 except portage.exception.AmbiguousPackageName, e:
5293                         pkgs = e.args[0]
5294                         portage.writemsg("\n\n!!! An atom in the dependencies " + \
5295                                 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5296                         for cpv in pkgs:
5297                                 portage.writemsg("    %s\n" % cpv, noiselevel=-1)
5298                         portage.writemsg("\n", noiselevel=-1)
5299                         if mytype == "binary":
5300                                 portage.writemsg(
5301                                         "!!! This binary package cannot be installed: '%s'\n" % \
5302                                         mykey, noiselevel=-1)
5303                         elif mytype == "ebuild":
5304                                 portdb = self.roots[myroot].trees["porttree"].dbapi
5305                                 myebuild, mylocation = portdb.findname2(mykey)
5306                                 portage.writemsg("!!! This ebuild cannot be installed: " + \
5307                                         "'%s'\n" % myebuild, noiselevel=-1)
5308                         portage.writemsg("!!! Please notify the package maintainer " + \
5309                                 "that atoms must be fully-qualified.\n", noiselevel=-1)
5310                         return 0
5311                 return 1
5312
5313         def _priority(self, **kwargs):
5314                 if "remove" in self.myparams:
5315                         priority_constructor = UnmergeDepPriority
5316                 else:
5317                         priority_constructor = DepPriority
5318                 return priority_constructor(**kwargs)
5319
5320         def _dep_expand(self, root_config, atom_without_category):
5321                 """
5322                 @param root_config: a root config instance
5323                 @type root_config: RootConfig
5324                 @param atom_without_category: an atom without a category component
5325                 @type atom_without_category: String
5326                 @rtype: list
5327                 @returns: a list of atoms containing categories (possibly empty)
5328                 """
5329                 null_cp = portage.dep_getkey(insert_category_into_atom(
5330                         atom_without_category, "null"))
5331                 cat, atom_pn = portage.catsplit(null_cp)
5332
5333                 dbs = self._filtered_trees[root_config.root]["dbs"]
5334                 categories = set()
5335                 for db, pkg_type, built, installed, db_keys in dbs:
5336                         for cat in db.categories:
5337                                 if db.cp_list("%s/%s" % (cat, atom_pn)):
5338                                         categories.add(cat)
5339
5340                 deps = []
5341                 for cat in categories:
5342                         deps.append(insert_category_into_atom(
5343                                 atom_without_category, cat))
5344                 return deps
5345
5346         def _have_new_virt(self, root, atom_cp):
5347                 ret = False
5348                 for db, pkg_type, built, installed, db_keys in \
5349                         self._filtered_trees[root]["dbs"]:
5350                         if db.cp_list(atom_cp):
5351                                 ret = True
5352                                 break
5353                 return ret
5354
5355         def _iter_atoms_for_pkg(self, pkg):
5356                 # TODO: add multiple $ROOT support
5357                 if pkg.root != self.target_root:
5358                         return
5359                 atom_arg_map = self._atom_arg_map
5360                 root_config = self.roots[pkg.root]
5361                 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5362                         atom_cp = portage.dep_getkey(atom)
5363                         if atom_cp != pkg.cp and \
5364                                 self._have_new_virt(pkg.root, atom_cp):
5365                                 continue
5366                         visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5367                         visible_pkgs.reverse() # descending order
5368                         higher_slot = None
5369                         for visible_pkg in visible_pkgs:
5370                                 if visible_pkg.cp != atom_cp:
5371                                         continue
5372                                 if pkg >= visible_pkg:
5373                                         # This is descending order, and we're not
5374                                         # interested in any versions <= pkg given.
5375                                         break
5376                                 if pkg.slot_atom != visible_pkg.slot_atom:
5377                                         higher_slot = visible_pkg
5378                                         break
5379                         if higher_slot is not None:
5380                                 continue
5381                         for arg in atom_arg_map[(atom, pkg.root)]:
5382                                 if isinstance(arg, PackageArg) and \
5383                                         arg.package != pkg:
5384                                         continue
5385                                 yield arg, atom
5386
5387         def select_files(self, myfiles):
5388                 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5389                 appropriate depgraph and return a favorite list."""
5390                 debug = "--debug" in self.myopts
5391                 root_config = self.roots[self.target_root]
5392                 sets = root_config.sets
5393                 getSetAtoms = root_config.setconfig.getSetAtoms
5394                 myfavorites=[]
5395                 myroot = self.target_root
5396                 dbs = self._filtered_trees[myroot]["dbs"]
5397                 vardb = self.trees[myroot]["vartree"].dbapi
5398                 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5399                 portdb = self.trees[myroot]["porttree"].dbapi
5400                 bindb = self.trees[myroot]["bintree"].dbapi
5401                 pkgsettings = self.pkgsettings[myroot]
5402                 args = []
5403                 onlydeps = "--onlydeps" in self.myopts
5404                 lookup_owners = []
5405                 for x in myfiles:
5406                         ext = os.path.splitext(x)[1]
5407                         if ext==".tbz2":
5408                                 if not os.path.exists(x):
5409                                         if os.path.exists(
5410                                                 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5411                                                 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5412                                         elif os.path.exists(
5413                                                 os.path.join(pkgsettings["PKGDIR"], x)):
5414                                                 x = os.path.join(pkgsettings["PKGDIR"], x)
5415                                         else:
5416                                                 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5417                                                 print "!!! Please ensure the tbz2 exists as specified.\n"
5418                                                 return 0, myfavorites
5419                                 mytbz2=portage.xpak.tbz2(x)
5420                                 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5421                                 if os.path.realpath(x) != \
5422                                         os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5423                                         print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5424                                         return 0, myfavorites
5425                                 db_keys = list(bindb._aux_cache_keys)
5426                                 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5427                                 pkg = Package(type_name="binary", root_config=root_config,
5428                                         cpv=mykey, built=True, metadata=metadata,
5429                                         onlydeps=onlydeps)
5430                                 self._pkg_cache[pkg] = pkg
5431                                 args.append(PackageArg(arg=x, package=pkg,
5432                                         root_config=root_config))
5433                         elif ext==".ebuild":
5434                                 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5435                                 pkgdir = os.path.dirname(ebuild_path)
5436                                 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5437                                 cp = pkgdir[len(tree_root)+1:]
5438                                 e = portage.exception.PackageNotFound(
5439                                         ("%s is not in a valid portage tree " + \
5440                                         "hierarchy or does not exist") % x)
5441                                 if not portage.isvalidatom(cp):
5442                                         raise e
5443                                 cat = portage.catsplit(cp)[0]
5444                                 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5445                                 if not portage.isvalidatom("="+mykey):
5446                                         raise e
5447                                 ebuild_path = portdb.findname(mykey)
5448                                 if ebuild_path:
5449                                         if ebuild_path != os.path.join(os.path.realpath(tree_root),
5450                                                 cp, os.path.basename(ebuild_path)):
5451                                                 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5452                                                 return 0, myfavorites
5453                                         if mykey not in portdb.xmatch(
5454                                                 "match-visible", portage.dep_getkey(mykey)):
5455                                                 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5456                                                 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5457                                                 print colorize("BAD", "*** page for details.")
5458                                                 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5459                                                         "Continuing...")
5460                                 else:
5461                                         raise portage.exception.PackageNotFound(
5462                                                 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5463                                 db_keys = list(portdb._aux_cache_keys)
5464                                 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5465                                 pkg = Package(type_name="ebuild", root_config=root_config,
5466                                         cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5467                                 pkgsettings.setcpv(pkg)
5468                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5469                                 self._pkg_cache[pkg] = pkg
5470                                 args.append(PackageArg(arg=x, package=pkg,
5471                                         root_config=root_config))
5472                         elif x.startswith(os.path.sep):
5473                                 if not x.startswith(myroot):
5474                                         portage.writemsg(("\n\n!!! '%s' does not start with" + \
5475                                                 " $ROOT.\n") % x, noiselevel=-1)
5476                                         return 0, []
5477                                 # Queue these up since it's most efficient to handle
5478                                 # multiple files in a single iter_owners() call.
5479                                 lookup_owners.append(x)
5480                         else:
5481                                 if x in ("system", "world"):
5482                                         x = SETPREFIX + x
5483                                 if x.startswith(SETPREFIX):
5484                                         s = x[len(SETPREFIX):]
5485                                         if s not in sets:
5486                                                 raise portage.exception.PackageSetNotFound(s)
5487                                         if s in self._sets:
5488                                                 continue
5489                                         # Recursively expand sets so that containment tests in
5490                                         # self._get_parent_sets() properly match atoms in nested
5491                                         # sets (like if world contains system).
5492                                         expanded_set = InternalPackageSet(
5493                                                 initial_atoms=getSetAtoms(s))
5494                                         self._sets[s] = expanded_set
5495                                         args.append(SetArg(arg=x, set=expanded_set,
5496                                                 root_config=root_config))
5497                                         continue
5498                                 if not is_valid_package_atom(x):
5499                                         portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5500                                                 noiselevel=-1)
5501                                         portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5502                                         portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5503                                         return (0,[])
5504                                 # Don't expand categories or old-style virtuals here unless
5505                                 # necessary. Expansion of old-style virtuals here causes at
5506                                 # least the following problems:
5507                                 #   1) It's more difficult to determine which set(s) an atom
5508                                 #      came from, if any.
5509                                 #   2) It takes away freedom from the resolver to choose other
5510                                 #      possible expansions when necessary.
5511                                 if "/" in x:
5512                                         args.append(AtomArg(arg=x, atom=x,
5513                                                 root_config=root_config))
5514                                         continue
5515                                 expanded_atoms = self._dep_expand(root_config, x)
5516                                 installed_cp_set = set()
5517                                 for atom in expanded_atoms:
5518                                         atom_cp = portage.dep_getkey(atom)
5519                                         if vardb.cp_list(atom_cp):
5520                                                 installed_cp_set.add(atom_cp)
5521                                 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5522                                         installed_cp = iter(installed_cp_set).next()
5523                                         expanded_atoms = [atom for atom in expanded_atoms \
5524                                                 if portage.dep_getkey(atom) == installed_cp]
5525
5526                                 if len(expanded_atoms) > 1:
5527                                         print
5528                                         print
5529                                         ambiguous_package_name(x, expanded_atoms, root_config,
5530                                                 self.spinner, self.myopts)
5531                                         return False, myfavorites
5532                                 if expanded_atoms:
5533                                         atom = expanded_atoms[0]
5534                                 else:
5535                                         null_atom = insert_category_into_atom(x, "null")
5536                                         null_cp = portage.dep_getkey(null_atom)
5537                                         cat, atom_pn = portage.catsplit(null_cp)
5538                                         virts_p = root_config.settings.get_virts_p().get(atom_pn)
5539                                         if virts_p:
5540                                                 # Allow the depgraph to choose which virtual.
5541                                                 atom = insert_category_into_atom(x, "virtual")
5542                                         else:
5543                                                 atom = insert_category_into_atom(x, "null")
5544
5545                                 args.append(AtomArg(arg=x, atom=atom,
5546                                         root_config=root_config))
5547
5548                 if lookup_owners:
5549                         relative_paths = []
5550                         search_for_multiple = False
5551                         if len(lookup_owners) > 1:
5552                                 search_for_multiple = True
5553
5554                         for x in lookup_owners:
5555                                 if not search_for_multiple and os.path.isdir(x):
5556                                         search_for_multiple = True
5557                                 relative_paths.append(x[len(myroot):])
5558
5559                         owners = set()
5560                         for pkg, relative_path in \
5561                                 real_vardb._owners.iter_owners(relative_paths):
5562                                 owners.add(pkg.mycpv)
5563                                 if not search_for_multiple:
5564                                         break
5565
5566                         if not owners:
5567                                 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5568                                         "by any package.\n") % lookup_owners[0], noiselevel=-1)
5569                                 return 0, []
5570
5571                         for cpv in owners:
5572                                 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5573                                 if not slot:
5574                                         # portage now masks packages with missing slot, but it's
5575                                         # possible that one was installed by an older version
5576                                         atom = portage.cpv_getkey(cpv)
5577                                 else:
5578                                         atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5579                                 args.append(AtomArg(arg=atom, atom=atom,
5580                                         root_config=root_config))
5581
5582                 if "--update" in self.myopts:
5583                         # In some cases, the greedy slots behavior can pull in a slot that
5584                         # the user would want to uninstall due to it being blocked by a
5585                         # newer version in a different slot. Therefore, it's necessary to
5586                         # detect and discard any that should be uninstalled. Each time
5587                         # that arguments are updated, package selections are repeated in
5588                         # order to ensure consistency with the current arguments:
5589                         #
5590                         #  1) Initialize args
5591                         #  2) Select packages and generate initial greedy atoms
5592                         #  3) Update args with greedy atoms
5593                         #  4) Select packages and generate greedy atoms again, while
5594                         #     accounting for any blockers between selected packages
5595                         #  5) Update args with revised greedy atoms
5596
5597                         self._set_args(args)
5598                         greedy_args = []
5599                         for arg in args:
5600                                 greedy_args.append(arg)
5601                                 if not isinstance(arg, AtomArg):
5602                                         continue
5603                                 for atom in self._greedy_slots(arg.root_config, arg.atom):
5604                                         greedy_args.append(
5605                                                 AtomArg(arg=arg.arg, atom=atom,
5606                                                         root_config=arg.root_config))
5607
5608                         self._set_args(greedy_args)
5609                         del greedy_args
5610
5611                         # Revise greedy atoms, accounting for any blockers
5612                         # between selected packages.
5613                         revised_greedy_args = []
5614                         for arg in args:
5615                                 revised_greedy_args.append(arg)
5616                                 if not isinstance(arg, AtomArg):
5617                                         continue
5618                                 for atom in self._greedy_slots(arg.root_config, arg.atom,
5619                                         blocker_lookahead=True):
5620                                         revised_greedy_args.append(
5621                                                 AtomArg(arg=arg.arg, atom=atom,
5622                                                         root_config=arg.root_config))
5623                         args = revised_greedy_args
5624                         del revised_greedy_args
5625
5626                 self._set_args(args)
5627
5628                 myfavorites = set(myfavorites)
5629                 for arg in args:
5630                         if isinstance(arg, (AtomArg, PackageArg)):
5631                                 myfavorites.add(arg.atom)
5632                         elif isinstance(arg, SetArg):
5633                                 myfavorites.add(arg.arg)
5634                 myfavorites = list(myfavorites)
5635
5636                 pprovideddict = pkgsettings.pprovideddict
5637                 if debug:
5638                         portage.writemsg("\n", noiselevel=-1)
5639                 # Order needs to be preserved since a feature of --nodeps
5640                 # is to allow the user to force a specific merge order.
5641                 args.reverse()
5642                 while args:
5643                         arg = args.pop()
5644                         for atom in arg.set:
5645                                 self.spinner.update()
5646                                 dep = Dependency(atom=atom, onlydeps=onlydeps,
5647                                         root=myroot, parent=arg)
5648                                 atom_cp = portage.dep_getkey(atom)
5649                                 try:
5650                                         pprovided = pprovideddict.get(portage.dep_getkey(atom))
5651                                         if pprovided and portage.match_from_list(atom, pprovided):
5652                                                 # A provided package has been specified on the command line.
5653                                                 self._pprovided_args.append((arg, atom))
5654                                                 continue
5655                                         if isinstance(arg, PackageArg):
5656                                                 if not self._add_pkg(arg.package, dep) or \
5657                                                         not self._create_graph():
5658                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5659                                                                 "dependencies for %s\n") % arg.arg)
5660                                                         return 0, myfavorites
5661                                                 continue
5662                                         if debug:
5663                                                 portage.writemsg("      Arg: %s\n     Atom: %s\n" % \
5664                                                         (arg, atom), noiselevel=-1)
5665                                         pkg, existing_node = self._select_package(
5666                                                 myroot, atom, onlydeps=onlydeps)
5667                                         if not pkg:
5668                                                 if not (isinstance(arg, SetArg) and \
5669                                                         arg.name in ("system", "world")):
5670                                                         self._unsatisfied_deps_for_display.append(
5671                                                                 ((myroot, atom), {}))
5672                                                         return 0, myfavorites
5673                                                 self._missing_args.append((arg, atom))
5674                                                 continue
5675                                         if atom_cp != pkg.cp:
5676                                                 # For old-style virtuals, we need to repeat the
5677                                                 # package.provided check against the selected package.
5678                                                 expanded_atom = atom.replace(atom_cp, pkg.cp)
5679                                                 pprovided = pprovideddict.get(pkg.cp)
5680                                                 if pprovided and \
5681                                                         portage.match_from_list(expanded_atom, pprovided):
5682                                                         # A provided package has been
5683                                                         # specified on the command line.
5684                                                         self._pprovided_args.append((arg, atom))
5685                                                         continue
5686                                         if pkg.installed and "selective" not in self.myparams:
5687                                                 self._unsatisfied_deps_for_display.append(
5688                                                         ((myroot, atom), {}))
5689                                                 # Previous behavior was to bail out in this case, but
5690                                                 # since the dep is satisfied by the installed package,
5691                                                 # it's more friendly to continue building the graph
5692                                                 # and just show a warning message. Therefore, only bail
5693                                                 # out here if the atom is not from either the system or
5694                                                 # world set.
5695                                                 if not (isinstance(arg, SetArg) and \
5696                                                         arg.name in ("system", "world")):
5697                                                         return 0, myfavorites
5698
5699                                         # Add the selected package to the graph as soon as possible
5700                                         # so that later dep_check() calls can use it as feedback
5701                                         # for making more consistent atom selections.
5702                                         if not self._add_pkg(pkg, dep):
5703                                                 if isinstance(arg, SetArg):
5704                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5705                                                                 "dependencies for %s from %s\n") % \
5706                                                                 (atom, arg.arg))
5707                                                 else:
5708                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5709                                                                 "dependencies for %s\n") % atom)
5710                                                 return 0, myfavorites
5711
5712                                 except portage.exception.MissingSignature, e:
5713                                         portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5714                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5715                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5716                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5717                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5718                                         return 0, myfavorites
5719                                 except portage.exception.InvalidSignature, e:
5720                                         portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5721                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5722                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5723                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5724                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5725                                         return 0, myfavorites
5726                                 except SystemExit, e:
5727                                         raise # Needed else can't exit
5728                                 except Exception, e:
5729                                         print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5730                                         print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5731                                         raise
5732
5733                 # Now that the root packages have been added to the graph,
5734                 # process the dependencies.
5735                 if not self._create_graph():
5736                         return 0, myfavorites
5737
5738                 missing=0
5739                 if "--usepkgonly" in self.myopts:
5740                         for xs in self.digraph.all_nodes():
5741                                 if not isinstance(xs, Package):
5742                                         continue
5743                                 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5744                                         if missing == 0:
5745                                                 print
5746                                         missing += 1
5747                                         print "Missing binary for:",xs[2]
5748
5749                 try:
5750                         self.altlist()
5751                 except self._unknown_internal_error:
5752                         return False, myfavorites
5753
5754                 # We're true here unless we are missing binaries.
5755                 return (not missing,myfavorites)
5756
5757         def _set_args(self, args):
5758                 """
5759                 Create the "args" package set from atoms and packages given as
5760                 arguments. This method can be called multiple times if necessary.
5761                 The package selection cache is automatically invalidated, since
5762                 arguments influence package selections.
5763                 """
5764                 args_set = self._sets["args"]
5765                 args_set.clear()
5766                 for arg in args:
5767                         if not isinstance(arg, (AtomArg, PackageArg)):
5768                                 continue
5769                         atom = arg.atom
5770                         if atom in args_set:
5771                                 continue
5772                         args_set.add(atom)
5773
5774                 self._set_atoms.clear()
5775                 self._set_atoms.update(chain(*self._sets.itervalues()))
5776                 atom_arg_map = self._atom_arg_map
5777                 atom_arg_map.clear()
5778                 for arg in args:
5779                         for atom in arg.set:
5780                                 atom_key = (atom, arg.root_config.root)
5781                                 refs = atom_arg_map.get(atom_key)
5782                                 if refs is None:
5783                                         refs = []
5784                                         atom_arg_map[atom_key] = refs
5785                                         if arg not in refs:
5786                                                 refs.append(arg)
5787
5788                 # Invalidate the package selection cache, since
5789                 # arguments influence package selections.
5790                 self._highest_pkg_cache.clear()
5791                 for trees in self._filtered_trees.itervalues():
5792                         trees["porttree"].dbapi._clear_cache()
5793
5794         def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5795                 """
5796                 Return a list of slot atoms corresponding to installed slots that
5797                 differ from the slot of the highest visible match. When
5798                 blocker_lookahead is True, slot atoms that would trigger a blocker
5799                 conflict are automatically discarded, potentially allowing automatic
5800                 uninstallation of older slots when appropriate.
5801                 """
5802                 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5803                 if highest_pkg is None:
5804                         return []
5805                 vardb = root_config.trees["vartree"].dbapi
5806                 slots = set()
5807                 for cpv in vardb.match(atom):
5808                         # don't mix new virtuals with old virtuals
5809                         if portage.cpv_getkey(cpv) == highest_pkg.cp:
5810                                 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5811
5812                 slots.add(highest_pkg.metadata["SLOT"])
5813                 if len(slots) == 1:
5814                         return []
5815                 greedy_pkgs = []
5816                 slots.remove(highest_pkg.metadata["SLOT"])
5817                 while slots:
5818                         slot = slots.pop()
5819                         slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5820                         pkg, in_graph = self._select_package(root_config.root, slot_atom)
5821                         if pkg is not None and \
5822                                 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
5823                                 greedy_pkgs.append(pkg)
5824                 if not greedy_pkgs:
5825                         return []
5826                 if not blocker_lookahead:
5827                         return [pkg.slot_atom for pkg in greedy_pkgs]
5828
5829                 blockers = {}
5830                 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5831                 for pkg in greedy_pkgs + [highest_pkg]:
5832                         dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5833                         try:
5834                                 atoms = self._select_atoms(
5835                                         pkg.root, dep_str, pkg.use.enabled,
5836                                         parent=pkg, strict=True)
5837                         except portage.exception.InvalidDependString:
5838                                 continue
5839                         blocker_atoms = (x for x in atoms if x.blocker)
5840                         blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5841
5842                 if highest_pkg not in blockers:
5843                         return []
5844
5845                 # filter packages with invalid deps
5846                 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5847
5848                 # filter packages that conflict with highest_pkg
5849                 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5850                         (blockers[highest_pkg].findAtomForPackage(pkg) or \
5851                         blockers[pkg].findAtomForPackage(highest_pkg))]
5852
5853                 if not greedy_pkgs:
5854                         return []
5855
5856                 # If two packages conflict, discard the lower version.
5857                 discard_pkgs = set()
5858                 greedy_pkgs.sort(reverse=True)
5859                 for i in xrange(len(greedy_pkgs) - 1):
5860                         pkg1 = greedy_pkgs[i]
5861                         if pkg1 in discard_pkgs:
5862                                 continue
5863                         for j in xrange(i + 1, len(greedy_pkgs)):
5864                                 pkg2 = greedy_pkgs[j]
5865                                 if pkg2 in discard_pkgs:
5866                                         continue
5867                                 if blockers[pkg1].findAtomForPackage(pkg2) or \
5868                                         blockers[pkg2].findAtomForPackage(pkg1):
5869                                         # pkg1 > pkg2
5870                                         discard_pkgs.add(pkg2)
5871
5872                 return [pkg.slot_atom for pkg in greedy_pkgs \
5873                         if pkg not in discard_pkgs]
5874
5875         def _select_atoms_from_graph(self, *pargs, **kwargs):
5876                 """
5877                 Prefer atoms matching packages that have already been
5878                 added to the graph or those that are installed and have
5879                 not been scheduled for replacement.
5880                 """
5881                 kwargs["trees"] = self._graph_trees
5882                 return self._select_atoms_highest_available(*pargs, **kwargs)
5883
5884         def _select_atoms_highest_available(self, root, depstring,
5885                 myuse=None, parent=None, strict=True, trees=None, priority=None):
5886                 """This will raise InvalidDependString if necessary. If trees is
5887                 None then self._filtered_trees is used."""
5888                 pkgsettings = self.pkgsettings[root]
5889                 if trees is None:
5890                         trees = self._filtered_trees
5891                 if not getattr(priority, "buildtime", False):
5892                         # The parent should only be passed to dep_check() for buildtime
5893                         # dependencies since that's the only case when it's appropriate
5894                         # to trigger the circular dependency avoidance code which uses it.
5895                         # It's important not to trigger the same circular dependency
5896                         # avoidance code for runtime dependencies since it's not needed
5897                         # and it can promote an incorrect package choice.
5898                         parent = None
5899                 if True:
5900                         try:
5901                                 if parent is not None:
5902                                         trees[root]["parent"] = parent
5903                                 if not strict:
5904                                         portage.dep._dep_check_strict = False
5905                                 mycheck = portage.dep_check(depstring, None,
5906                                         pkgsettings, myuse=myuse,
5907                                         myroot=root, trees=trees)
5908                         finally:
5909                                 if parent is not None:
5910                                         trees[root].pop("parent")
5911                                 portage.dep._dep_check_strict = True
5912                         if not mycheck[0]:
5913                                 raise portage.exception.InvalidDependString(mycheck[1])
5914                         selected_atoms = mycheck[1]
5915                 return selected_atoms
5916
5917         def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5918                 atom = portage.dep.Atom(atom)
5919                 atom_set = InternalPackageSet(initial_atoms=(atom,))
5920                 atom_without_use = atom
5921                 if atom.use:
5922                         atom_without_use = portage.dep.remove_slot(atom)
5923                         if atom.slot:
5924                                 atom_without_use += ":" + atom.slot
5925                         atom_without_use = portage.dep.Atom(atom_without_use)
5926                 xinfo = '"%s"' % atom
5927                 if arg:
5928                         xinfo='"%s"' % arg
5929                 # Discard null/ from failed cpv_expand category expansion.
5930                 xinfo = xinfo.replace("null/", "")
5931                 masked_packages = []
5932                 missing_use = []
5933                 missing_licenses = []
5934                 have_eapi_mask = False
5935                 pkgsettings = self.pkgsettings[root]
5936                 implicit_iuse = pkgsettings._get_implicit_iuse()
5937                 root_config = self.roots[root]
5938                 portdb = self.roots[root].trees["porttree"].dbapi
5939                 dbs = self._filtered_trees[root]["dbs"]
5940                 for db, pkg_type, built, installed, db_keys in dbs:
5941                         if installed:
5942                                 continue
5943                         match = db.match
5944                         if hasattr(db, "xmatch"):
5945                                 cpv_list = db.xmatch("match-all", atom_without_use)
5946                         else:
5947                                 cpv_list = db.match(atom_without_use)
5948                         # descending order
5949                         cpv_list.reverse()
5950                         for cpv in cpv_list:
5951                                 metadata, mreasons  = get_mask_info(root_config, cpv,
5952                                         pkgsettings, db, pkg_type, built, installed, db_keys)
5953                                 if metadata is not None:
5954                                         pkg = Package(built=built, cpv=cpv,
5955                                                 installed=installed, metadata=metadata,
5956                                                 root_config=root_config)
5957                                         if pkg.cp != atom.cp:
5958                                                 # A cpv can be returned from dbapi.match() as an
5959                                                 # old-style virtual match even in cases when the
5960                                                 # package does not actually PROVIDE the virtual.
5961                                                 # Filter out any such false matches here.
5962                                                 if not atom_set.findAtomForPackage(pkg):
5963                                                         continue
5964                                         if atom.use and not mreasons:
5965                                                 missing_use.append(pkg)
5966                                                 continue
5967                                 masked_packages.append(
5968                                         (root_config, pkgsettings, cpv, metadata, mreasons))
5969
5970                 missing_use_reasons = []
5971                 missing_iuse_reasons = []
5972                 for pkg in missing_use:
5973                         use = pkg.use.enabled
5974                         iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
5975                         iuse_re = re.compile("^(%s)$" % "|".join(iuse))
5976                         missing_iuse = []
5977                         for x in atom.use.required:
5978                                 if iuse_re.match(x) is None:
5979                                         missing_iuse.append(x)
5980                         mreasons = []
5981                         if missing_iuse:
5982                                 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
5983                                 missing_iuse_reasons.append((pkg, mreasons))
5984                         else:
5985                                 need_enable = sorted(atom.use.enabled.difference(use))
5986                                 need_disable = sorted(atom.use.disabled.intersection(use))
5987                                 if need_enable or need_disable:
5988                                         changes = []
5989                                         changes.extend(colorize("red", "+" + x) \
5990                                                 for x in need_enable)
5991                                         changes.extend(colorize("blue", "-" + x) \
5992                                                 for x in need_disable)
5993                                         mreasons.append("Change USE: %s" % " ".join(changes))
5994                                         missing_use_reasons.append((pkg, mreasons))
5995
5996                 if missing_iuse_reasons and not missing_use_reasons:
5997                         missing_use_reasons = missing_iuse_reasons
5998                 elif missing_use_reasons:
5999                         # Only show the latest version.
6000                         del missing_use_reasons[1:]
6001
6002                 if missing_use_reasons:
6003                         print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
6004                         print "!!! One of the following packages is required to complete your request:"
6005                         for pkg, mreasons in missing_use_reasons:
6006                                 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
6007
6008                 elif masked_packages:
6009                         print "\n!!! " + \
6010                                 colorize("BAD", "All ebuilds that could satisfy ") + \
6011                                 colorize("INFORM", xinfo) + \
6012                                 colorize("BAD", " have been masked.")
6013                         print "!!! One of the following masked packages is required to complete your request:"
6014                         have_eapi_mask = show_masked_packages(masked_packages)
6015                         if have_eapi_mask:
6016                                 print
6017                                 msg = ("The current version of portage supports " + \
6018                                         "EAPI '%s'. You must upgrade to a newer version" + \
6019                                         " of portage before EAPI masked packages can" + \
6020                                         " be installed.") % portage.const.EAPI
6021                                 from textwrap import wrap
6022                                 for line in wrap(msg, 75):
6023                                         print line
6024                         print
6025                         show_mask_docs()
6026                 else:
6027                         print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
6028
6029                 # Show parent nodes and the argument that pulled them in.
6030                 traversed_nodes = set()
6031                 node = myparent
6032                 msg = []
6033                 while node is not None:
6034                         traversed_nodes.add(node)
6035                         msg.append('(dependency required by "%s" [%s])' % \
6036                                 (colorize('INFORM', str(node.cpv)), node.type_name))
6037                         # When traversing to parents, prefer arguments over packages
6038                         # since arguments are root nodes. Never traverse the same
6039                         # package twice, in order to prevent an infinite loop.
6040                         selected_parent = None
6041                         for parent in self.digraph.parent_nodes(node):
6042                                 if isinstance(parent, DependencyArg):
6043                                         msg.append('(dependency required by "%s" [argument])' % \
6044                                                 (colorize('INFORM', str(parent))))
6045                                         selected_parent = None
6046                                         break
6047                                 if parent not in traversed_nodes:
6048                                         selected_parent = parent
6049                         node = selected_parent
6050                 for line in msg:
6051                         print line
6052
6053                 print
6054
6055         def _select_pkg_highest_available(self, root, atom, onlydeps=False):
6056                 cache_key = (root, atom, onlydeps)
6057                 ret = self._highest_pkg_cache.get(cache_key)
6058                 if ret is not None:
6059                         pkg, existing = ret
6060                         if pkg and not existing:
6061                                 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
6062                                 if existing and existing == pkg:
6063                                         # Update the cache to reflect that the
6064                                         # package has been added to the graph.
6065                                         ret = pkg, pkg
6066                                         self._highest_pkg_cache[cache_key] = ret
6067                         return ret
6068                 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
6069                 self._highest_pkg_cache[cache_key] = ret
6070                 pkg, existing = ret
6071                 if pkg is not None:
6072                         settings = pkg.root_config.settings
6073                         if visible(settings, pkg) and not (pkg.installed and \
6074                                 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
6075                                 pkg.root_config.visible_pkgs.cpv_inject(pkg)
6076                 return ret
6077
6078         def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
6079                 root_config = self.roots[root]
6080                 pkgsettings = self.pkgsettings[root]
6081                 dbs = self._filtered_trees[root]["dbs"]
6082                 vardb = self.roots[root].trees["vartree"].dbapi
6083                 portdb = self.roots[root].trees["porttree"].dbapi
6084                 # List of acceptable packages, ordered by type preference.
6085                 matched_packages = []
6086                 highest_version = None
6087                 if not isinstance(atom, portage.dep.Atom):
6088                         atom = portage.dep.Atom(atom)
6089                 atom_cp = atom.cp
6090                 atom_set = InternalPackageSet(initial_atoms=(atom,))
6091                 existing_node = None
6092                 myeb = None
6093                 usepkgonly = "--usepkgonly" in self.myopts
6094                 empty = "empty" in self.myparams
6095                 selective = "selective" in self.myparams
6096                 reinstall = False
6097                 noreplace = "--noreplace" in self.myopts
6098                 # Behavior of the "selective" parameter depends on
6099                 # whether or not a package matches an argument atom.
6100                 # If an installed package provides an old-style
6101                 # virtual that is no longer provided by an available
6102                 # package, the installed package may match an argument
6103                 # atom even though none of the available packages do.
6104                 # Therefore, "selective" logic does not consider
6105                 # whether or not an installed package matches an
6106                 # argument atom. It only considers whether or not
6107                 # available packages match argument atoms, which is
6108                 # represented by the found_available_arg flag.
6109                 found_available_arg = False
6110                 for find_existing_node in True, False:
6111                         if existing_node:
6112                                 break
6113                         for db, pkg_type, built, installed, db_keys in dbs:
6114                                 if existing_node:
6115                                         break
6116                                 if installed and not find_existing_node:
6117                                         want_reinstall = reinstall or empty or \
6118                                                 (found_available_arg and not selective)
6119                                         if want_reinstall and matched_packages:
6120                                                 continue
6121                                 if hasattr(db, "xmatch"):
6122                                         cpv_list = db.xmatch("match-all", atom)
6123                                 else:
6124                                         cpv_list = db.match(atom)
6125
6126                                 # USE=multislot can make an installed package appear as if
6127                                 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
6128                                 # won't do any good as long as USE=multislot is enabled since
6129                                 # the newly built package still won't have the expected slot.
6130                                 # Therefore, assume that such SLOT dependencies are already
6131                                 # satisfied rather than forcing a rebuild.
6132                                 if installed and not cpv_list and atom.slot:
6133                                         for cpv in db.match(atom.cp):
6134                                                 slot_available = False
6135                                                 for other_db, other_type, other_built, \
6136                                                         other_installed, other_keys in dbs:
6137                                                         try:
6138                                                                 if atom.slot == \
6139                                                                         other_db.aux_get(cpv, ["SLOT"])[0]:
6140                                                                         slot_available = True
6141                                                                         break
6142                                                         except KeyError:
6143                                                                 pass
6144                                                 if not slot_available:
6145                                                         continue
6146                                                 inst_pkg = self._pkg(cpv, "installed",
6147                                                         root_config, installed=installed)
6148                                                 # Remove the slot from the atom and verify that
6149                                                 # the package matches the resulting atom.
6150                                                 atom_without_slot = portage.dep.remove_slot(atom)
6151                                                 if atom.use:
6152                                                         atom_without_slot += str(atom.use)
6153                                                 atom_without_slot = portage.dep.Atom(atom_without_slot)
6154                                                 if portage.match_from_list(
6155                                                         atom_without_slot, [inst_pkg]):
6156                                                         cpv_list = [inst_pkg.cpv]
6157                                                 break
6158
6159                                 if not cpv_list:
6160                                         continue
6161                                 pkg_status = "merge"
6162                                 if installed or onlydeps:
6163                                         pkg_status = "nomerge"
6164                                 # descending order
6165                                 cpv_list.reverse()
6166                                 for cpv in cpv_list:
6167                                         # Make --noreplace take precedence over --newuse.
6168                                         if not installed and noreplace and \
6169                                                 cpv in vardb.match(atom):
6170                                                 # If the installed version is masked, it may
6171                                                 # be necessary to look at lower versions,
6172                                                 # in case there is a visible downgrade.
6173                                                 continue
6174                                         reinstall_for_flags = None
6175                                         cache_key = (pkg_type, root, cpv, pkg_status)
6176                                         calculated_use = True
6177                                         pkg = self._pkg_cache.get(cache_key)
6178                                         if pkg is None:
6179                                                 calculated_use = False
6180                                                 try:
6181                                                         metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6182                                                 except KeyError:
6183                                                         continue
6184                                                 pkg = Package(built=built, cpv=cpv,
6185                                                         installed=installed, metadata=metadata,
6186                                                         onlydeps=onlydeps, root_config=root_config,
6187                                                         type_name=pkg_type)
6188                                                 metadata = pkg.metadata
6189                                                 if not built and ("?" in metadata["LICENSE"] or \
6190                                                         "?" in metadata["PROVIDE"]):
6191                                                         # This is avoided whenever possible because
6192                                                         # it's expensive. It only needs to be done here
6193                                                         # if it has an effect on visibility.
6194                                                         pkgsettings.setcpv(pkg)
6195                                                         metadata["USE"] = pkgsettings["PORTAGE_USE"]
6196                                                         calculated_use = True
6197                                                 self._pkg_cache[pkg] = pkg
6198
6199                                         if not installed or (built and matched_packages):
6200                                                 # Only enforce visibility on installed packages
6201                                                 # if there is at least one other visible package
6202                                                 # available. By filtering installed masked packages
6203                                                 # here, packages that have been masked since they
6204                                                 # were installed can be automatically downgraded
6205                                                 # to an unmasked version.
6206                                                 try:
6207                                                         if not visible(pkgsettings, pkg):
6208                                                                 continue
6209                                                 except portage.exception.InvalidDependString:
6210                                                         if not installed:
6211                                                                 continue
6212
6213                                                 # Enable upgrade or downgrade to a version
6214                                                 # with visible KEYWORDS when the installed
6215                                                 # version is masked by KEYWORDS, but never
6216                                                 # reinstall the same exact version only due
6217                                                 # to a KEYWORDS mask.
6218                                                 if built and matched_packages:
6219
6220                                                         different_version = None
6221                                                         for avail_pkg in matched_packages:
6222                                                                 if not portage.dep.cpvequal(
6223                                                                         pkg.cpv, avail_pkg.cpv):
6224                                                                         different_version = avail_pkg
6225                                                                         break
6226                                                         if different_version is not None:
6227
6228                                                                 if installed and \
6229                                                                         pkgsettings._getMissingKeywords(
6230                                                                         pkg.cpv, pkg.metadata):
6231                                                                         continue
6232
6233                                                                 # If the ebuild no longer exists or it's
6234                                                                 # keywords have been dropped, reject built
6235                                                                 # instances (installed or binary).
6236                                                                 # If --usepkgonly is enabled, assume that
6237                                                                 # the ebuild status should be ignored.
6238                                                                 if not usepkgonly:
6239                                                                         try:
6240                                                                                 pkg_eb = self._pkg(
6241                                                                                         pkg.cpv, "ebuild", root_config)
6242                                                                         except portage.exception.PackageNotFound:
6243                                                                                 continue
6244                                                                         else:
6245                                                                                 if not visible(pkgsettings, pkg_eb):
6246                                                                                         continue
6247
6248                                         if not pkg.built and not calculated_use:
6249                                                 # This is avoided whenever possible because
6250                                                 # it's expensive.
6251                                                 pkgsettings.setcpv(pkg)
6252                                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6253
6254                                         if pkg.cp != atom.cp:
6255                                                 # A cpv can be returned from dbapi.match() as an
6256                                                 # old-style virtual match even in cases when the
6257                                                 # package does not actually PROVIDE the virtual.
6258                                                 # Filter out any such false matches here.
6259                                                 if not atom_set.findAtomForPackage(pkg):
6260                                                         continue
6261
6262                                         myarg = None
6263                                         if root == self.target_root:
6264                                                 try:
6265                                                         # Ebuild USE must have been calculated prior
6266                                                         # to this point, in case atoms have USE deps.
6267                                                         myarg = self._iter_atoms_for_pkg(pkg).next()
6268                                                 except StopIteration:
6269                                                         pass
6270                                                 except portage.exception.InvalidDependString:
6271                                                         if not installed:
6272                                                                 # masked by corruption
6273                                                                 continue
6274                                         if not installed and myarg:
6275                                                 found_available_arg = True
6276
6277                                         if atom.use and not pkg.built:
6278                                                 use = pkg.use.enabled
6279                                                 if atom.use.enabled.difference(use):
6280                                                         continue
6281                                                 if atom.use.disabled.intersection(use):
6282                                                         continue
6283                                         if pkg.cp == atom_cp:
6284                                                 if highest_version is None:
6285                                                         highest_version = pkg
6286                                                 elif pkg > highest_version:
6287                                                         highest_version = pkg
6288                                         # At this point, we've found the highest visible
6289                                         # match from the current repo. Any lower versions
6290                                         # from this repo are ignored, so this so the loop
6291                                         # will always end with a break statement below
6292                                         # this point.
6293                                         if find_existing_node:
6294                                                 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6295                                                 if not e_pkg:
6296                                                         break
6297                                                 if portage.dep.match_from_list(atom, [e_pkg]):
6298                                                         if highest_version and \
6299                                                                 e_pkg.cp == atom_cp and \
6300                                                                 e_pkg < highest_version and \
6301                                                                 e_pkg.slot_atom != highest_version.slot_atom:
6302                                                                 # There is a higher version available in a
6303                                                                 # different slot, so this existing node is
6304                                                                 # irrelevant.
6305                                                                 pass
6306                                                         else:
6307                                                                 matched_packages.append(e_pkg)
6308                                                                 existing_node = e_pkg
6309                                                 break
6310                                         # Compare built package to current config and
6311                                         # reject the built package if necessary.
6312                                         if built and not installed and \
6313                                                 ("--newuse" in self.myopts or \
6314                                                 "--reinstall" in self.myopts):
6315                                                 iuses = pkg.iuse.all
6316                                                 old_use = pkg.use.enabled
6317                                                 if myeb:
6318                                                         pkgsettings.setcpv(myeb)
6319                                                 else:
6320                                                         pkgsettings.setcpv(pkg)
6321                                                 now_use = pkgsettings["PORTAGE_USE"].split()
6322                                                 forced_flags = set()
6323                                                 forced_flags.update(pkgsettings.useforce)
6324                                                 forced_flags.update(pkgsettings.usemask)
6325                                                 cur_iuse = iuses
6326                                                 if myeb and not usepkgonly:
6327                                                         cur_iuse = myeb.iuse.all
6328                                                 if self._reinstall_for_flags(forced_flags,
6329                                                         old_use, iuses,
6330                                                         now_use, cur_iuse):
6331                                                         break
6332                                         # Compare current config to installed package
6333                                         # and do not reinstall if possible.
6334                                         if not installed and \
6335                                                 ("--newuse" in self.myopts or \
6336                                                 "--reinstall" in self.myopts) and \
6337                                                 cpv in vardb.match(atom):
6338                                                 pkgsettings.setcpv(pkg)
6339                                                 forced_flags = set()
6340                                                 forced_flags.update(pkgsettings.useforce)
6341                                                 forced_flags.update(pkgsettings.usemask)
6342                                                 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6343                                                 old_iuse = set(filter_iuse_defaults(
6344                                                         vardb.aux_get(cpv, ["IUSE"])[0].split()))
6345                                                 cur_use = pkgsettings["PORTAGE_USE"].split()
6346                                                 cur_iuse = pkg.iuse.all
6347                                                 reinstall_for_flags = \
6348                                                         self._reinstall_for_flags(
6349                                                         forced_flags, old_use, old_iuse,
6350                                                         cur_use, cur_iuse)
6351                                                 if reinstall_for_flags:
6352                                                         reinstall = True
6353                                         if not built:
6354                                                 myeb = pkg
6355                                         matched_packages.append(pkg)
6356                                         if reinstall_for_flags:
6357                                                 self._reinstall_nodes[pkg] = \
6358                                                         reinstall_for_flags
6359                                         break
6360
6361                 if not matched_packages:
6362                         return None, None
6363
6364                 if "--debug" in self.myopts:
6365                         for pkg in matched_packages:
6366                                 portage.writemsg("%s %s\n" % \
6367                                         ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6368
6369                 # Filter out any old-style virtual matches if they are
6370                 # mixed with new-style virtual matches.
6371                 cp = portage.dep_getkey(atom)
6372                 if len(matched_packages) > 1 and \
6373                         "virtual" == portage.catsplit(cp)[0]:
6374                         for pkg in matched_packages:
6375                                 if pkg.cp != cp:
6376                                         continue
6377                                 # Got a new-style virtual, so filter
6378                                 # out any old-style virtuals.
6379                                 matched_packages = [pkg for pkg in matched_packages \
6380                                         if pkg.cp == cp]
6381                                 break
6382
6383                 if len(matched_packages) > 1:
6384                         bestmatch = portage.best(
6385                                 [pkg.cpv for pkg in matched_packages])
6386                         matched_packages = [pkg for pkg in matched_packages \
6387                                 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6388
6389                 # ordered by type preference ("ebuild" type is the last resort)
6390                 return  matched_packages[-1], existing_node
6391
6392         def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6393                 """
6394                 Select packages that have already been added to the graph or
6395                 those that are installed and have not been scheduled for
6396                 replacement.
6397                 """
6398                 graph_db = self._graph_trees[root]["porttree"].dbapi
6399                 matches = graph_db.match_pkgs(atom)
6400                 if not matches:
6401                         return None, None
6402                 pkg = matches[-1] # highest match
6403                 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
6404                 return pkg, in_graph
6405
6406         def _complete_graph(self):
6407                 """
6408                 Add any deep dependencies of required sets (args, system, world) that
6409                 have not been pulled into the graph yet. This ensures that the graph
6410                 is consistent such that initially satisfied deep dependencies are not
6411                 broken in the new graph. Initially unsatisfied dependencies are
6412                 irrelevant since we only want to avoid breaking dependencies that are
6413                 intially satisfied.
6414
6415                 Since this method can consume enough time to disturb users, it is
6416                 currently only enabled by the --complete-graph option.
6417                 """
6418                 if "--buildpkgonly" in self.myopts or \
6419                         "recurse" not in self.myparams:
6420                         return 1
6421
6422                 if "complete" not in self.myparams:
6423                         # Skip this to avoid consuming enough time to disturb users.
6424                         return 1
6425
6426                 # Put the depgraph into a mode that causes it to only
6427                 # select packages that have already been added to the
6428                 # graph or those that are installed and have not been
6429                 # scheduled for replacement. Also, toggle the "deep"
6430                 # parameter so that all dependencies are traversed and
6431                 # accounted for.
6432                 self._select_atoms = self._select_atoms_from_graph
6433                 self._select_package = self._select_pkg_from_graph
6434                 already_deep = "deep" in self.myparams
6435                 if not already_deep:
6436                         self.myparams.add("deep")
6437
6438                 for root in self.roots:
6439                         required_set_names = self._required_set_names.copy()
6440                         if root == self.target_root and \
6441                                 (already_deep or "empty" in self.myparams):
6442                                 required_set_names.difference_update(self._sets)
6443                         if not required_set_names and not self._ignored_deps:
6444                                 continue
6445                         root_config = self.roots[root]
6446                         setconfig = root_config.setconfig
6447                         args = []
6448                         # Reuse existing SetArg instances when available.
6449                         for arg in self.digraph.root_nodes():
6450                                 if not isinstance(arg, SetArg):
6451                                         continue
6452                                 if arg.root_config != root_config:
6453                                         continue
6454                                 if arg.name in required_set_names:
6455                                         args.append(arg)
6456                                         required_set_names.remove(arg.name)
6457                         # Create new SetArg instances only when necessary.
6458                         for s in required_set_names:
6459                                 expanded_set = InternalPackageSet(
6460                                         initial_atoms=setconfig.getSetAtoms(s))
6461                                 atom = SETPREFIX + s
6462                                 args.append(SetArg(arg=atom, set=expanded_set,
6463                                         root_config=root_config))
6464                         vardb = root_config.trees["vartree"].dbapi
6465                         for arg in args:
6466                                 for atom in arg.set:
6467                                         self._dep_stack.append(
6468                                                 Dependency(atom=atom, root=root, parent=arg))
6469                         if self._ignored_deps:
6470                                 self._dep_stack.extend(self._ignored_deps)
6471                                 self._ignored_deps = []
6472                         if not self._create_graph(allow_unsatisfied=True):
6473                                 return 0
6474                         # Check the unsatisfied deps to see if any initially satisfied deps
6475                         # will become unsatisfied due to an upgrade. Initially unsatisfied
6476                         # deps are irrelevant since we only want to avoid breaking deps
6477                         # that are initially satisfied.
6478                         while self._unsatisfied_deps:
6479                                 dep = self._unsatisfied_deps.pop()
6480                                 matches = vardb.match_pkgs(dep.atom)
6481                                 if not matches:
6482                                         self._initially_unsatisfied_deps.append(dep)
6483                                         continue
6484                                 # An scheduled installation broke a deep dependency.
6485                                 # Add the installed package to the graph so that it
6486                                 # will be appropriately reported as a slot collision
6487                                 # (possibly solvable via backtracking).
6488                                 pkg = matches[-1] # highest match
6489                                 if not self._add_pkg(pkg, dep):
6490                                         return 0
6491                                 if not self._create_graph(allow_unsatisfied=True):
6492                                         return 0
6493                 return 1
6494
6495         def _pkg(self, cpv, type_name, root_config, installed=False):
6496                 """
6497                 Get a package instance from the cache, or create a new
6498                 one if necessary. Raises KeyError from aux_get if it
6499                 failures for some reason (package does not exist or is
6500                 corrupt).
6501                 """
6502                 operation = "merge"
6503                 if installed:
6504                         operation = "nomerge"
6505                 pkg = self._pkg_cache.get(
6506                         (type_name, root_config.root, cpv, operation))
6507                 if pkg is None:
6508                         tree_type = self.pkg_tree_map[type_name]
6509                         db = root_config.trees[tree_type].dbapi
6510                         db_keys = list(self._trees_orig[root_config.root][
6511                                 tree_type].dbapi._aux_cache_keys)
6512                         try:
6513                                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6514                         except KeyError:
6515                                 raise portage.exception.PackageNotFound(cpv)
6516                         pkg = Package(cpv=cpv, metadata=metadata,
6517                                 root_config=root_config, installed=installed)
6518                         if type_name == "ebuild":
6519                                 settings = self.pkgsettings[root_config.root]
6520                                 settings.setcpv(pkg)
6521                                 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6522                         self._pkg_cache[pkg] = pkg
6523                 return pkg
6524
6525         def validate_blockers(self):
6526                 """Remove any blockers from the digraph that do not match any of the
6527                 packages within the graph.  If necessary, create hard deps to ensure
6528                 correct merge order such that mutually blocking packages are never
6529                 installed simultaneously."""
6530
6531                 if "--buildpkgonly" in self.myopts or \
6532                         "--nodeps" in self.myopts:
6533                         return True
6534
6535                 #if "deep" in self.myparams:
6536                 if True:
6537                         # Pull in blockers from all installed packages that haven't already
6538                         # been pulled into the depgraph.  This is not enabled by default
6539                         # due to the performance penalty that is incurred by all the
6540                         # additional dep_check calls that are required.
6541
6542                         dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6543                         for myroot in self.trees:
6544                                 vardb = self.trees[myroot]["vartree"].dbapi
6545                                 portdb = self.trees[myroot]["porttree"].dbapi
6546                                 pkgsettings = self.pkgsettings[myroot]
6547                                 final_db = self.mydbapi[myroot]
6548
6549                                 blocker_cache = BlockerCache(myroot, vardb)
6550                                 stale_cache = set(blocker_cache)
6551                                 for pkg in vardb:
6552                                         cpv = pkg.cpv
6553                                         stale_cache.discard(cpv)
6554                                         pkg_in_graph = self.digraph.contains(pkg)
6555
6556                                         # Check for masked installed packages. Only warn about
6557                                         # packages that are in the graph in order to avoid warning
6558                                         # about those that will be automatically uninstalled during
6559                                         # the merge process or by --depclean.
6560                                         if pkg in final_db:
6561                                                 if pkg_in_graph and not visible(pkgsettings, pkg):
6562                                                         self._masked_installed.add(pkg)
6563
6564                                         blocker_atoms = None
6565                                         blockers = None
6566                                         if pkg_in_graph:
6567                                                 blockers = []
6568                                                 try:
6569                                                         blockers.extend(
6570                                                                 self._blocker_parents.child_nodes(pkg))
6571                                                 except KeyError:
6572                                                         pass
6573                                                 try:
6574                                                         blockers.extend(
6575                                                                 self._irrelevant_blockers.child_nodes(pkg))
6576                                                 except KeyError:
6577                                                         pass
6578                                         if blockers is not None:
6579                                                 blockers = set(str(blocker.atom) \
6580                                                         for blocker in blockers)
6581
6582                                         # If this node has any blockers, create a "nomerge"
6583                                         # node for it so that they can be enforced.
6584                                         self.spinner.update()
6585                                         blocker_data = blocker_cache.get(cpv)
6586                                         if blocker_data is not None and \
6587                                                 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6588                                                 blocker_data = None
6589
6590                                         # If blocker data from the graph is available, use
6591                                         # it to validate the cache and update the cache if
6592                                         # it seems invalid.
6593                                         if blocker_data is not None and \
6594                                                 blockers is not None:
6595                                                 if not blockers.symmetric_difference(
6596                                                         blocker_data.atoms):
6597                                                         continue
6598                                                 blocker_data = None
6599
6600                                         if blocker_data is None and \
6601                                                 blockers is not None:
6602                                                 # Re-use the blockers from the graph.
6603                                                 blocker_atoms = sorted(blockers)
6604                                                 counter = long(pkg.metadata["COUNTER"])
6605                                                 blocker_data = \
6606                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6607                                                 blocker_cache[pkg.cpv] = blocker_data
6608                                                 continue
6609
6610                                         if blocker_data:
6611                                                 blocker_atoms = blocker_data.atoms
6612                                         else:
6613                                                 # Use aux_get() to trigger FakeVartree global
6614                                                 # updates on *DEPEND when appropriate.
6615                                                 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6616                                                 # It is crucial to pass in final_db here in order to
6617                                                 # optimize dep_check calls by eliminating atoms via
6618                                                 # dep_wordreduce and dep_eval calls.
6619                                                 try:
6620                                                         portage.dep._dep_check_strict = False
6621                                                         try:
6622                                                                 success, atoms = portage.dep_check(depstr,
6623                                                                         final_db, pkgsettings, myuse=pkg.use.enabled,
6624                                                                         trees=self._graph_trees, myroot=myroot)
6625                                                         except Exception, e:
6626                                                                 if isinstance(e, SystemExit):
6627                                                                         raise
6628                                                                 # This is helpful, for example, if a ValueError
6629                                                                 # is thrown from cpv_expand due to multiple
6630                                                                 # matches (this can happen if an atom lacks a
6631                                                                 # category).
6632                                                                 show_invalid_depstring_notice(
6633                                                                         pkg, depstr, str(e))
6634                                                                 del e
6635                                                                 raise
6636                                                 finally:
6637                                                         portage.dep._dep_check_strict = True
6638                                                 if not success:
6639                                                         replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6640                                                         if replacement_pkg and \
6641                                                                 replacement_pkg[0].operation == "merge":
6642                                                                 # This package is being replaced anyway, so
6643                                                                 # ignore invalid dependencies so as not to
6644                                                                 # annoy the user too much (otherwise they'd be
6645                                                                 # forced to manually unmerge it first).
6646                                                                 continue
6647                                                         show_invalid_depstring_notice(pkg, depstr, atoms)
6648                                                         return False
6649                                                 blocker_atoms = [myatom for myatom in atoms \
6650                                                         if myatom.startswith("!")]
6651                                                 blocker_atoms.sort()
6652                                                 counter = long(pkg.metadata["COUNTER"])
6653                                                 blocker_cache[cpv] = \
6654                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6655                                         if blocker_atoms:
6656                                                 try:
6657                                                         for atom in blocker_atoms:
6658                                                                 blocker = Blocker(atom=portage.dep.Atom(atom),
6659                                                                         eapi=pkg.metadata["EAPI"], root=myroot)
6660                                                                 self._blocker_parents.add(blocker, pkg)
6661                                                 except portage.exception.InvalidAtom, e:
6662                                                         depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6663                                                         show_invalid_depstring_notice(
6664                                                                 pkg, depstr, "Invalid Atom: %s" % (e,))
6665                                                         return False
6666                                 for cpv in stale_cache:
6667                                         del blocker_cache[cpv]
6668                                 blocker_cache.flush()
6669                                 del blocker_cache
6670
6671                 # Discard any "uninstall" tasks scheduled by previous calls
6672                 # to this method, since those tasks may not make sense given
6673                 # the current graph state.
6674                 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6675                 if previous_uninstall_tasks:
6676                         self._blocker_uninstalls = digraph()
6677                         self.digraph.difference_update(previous_uninstall_tasks)
6678
6679                 for blocker in self._blocker_parents.leaf_nodes():
6680                         self.spinner.update()
6681                         root_config = self.roots[blocker.root]
6682                         virtuals = root_config.settings.getvirtuals()
6683                         myroot = blocker.root
6684                         initial_db = self.trees[myroot]["vartree"].dbapi
6685                         final_db = self.mydbapi[myroot]
6686                         
6687                         provider_virtual = False
6688                         if blocker.cp in virtuals and \
6689                                 not self._have_new_virt(blocker.root, blocker.cp):
6690                                 provider_virtual = True
6691
6692                         if provider_virtual:
6693                                 atoms = []
6694                                 for provider_entry in virtuals[blocker.cp]:
6695                                         provider_cp = \
6696                                                 portage.dep_getkey(provider_entry)
6697                                         atoms.append(blocker.atom.replace(
6698                                                 blocker.cp, provider_cp))
6699                         else:
6700                                 atoms = [blocker.atom]
6701
6702                         blocked_initial = []
6703                         for atom in atoms:
6704                                 blocked_initial.extend(initial_db.match_pkgs(atom))
6705
6706                         blocked_final = []
6707                         for atom in atoms:
6708                                 blocked_final.extend(final_db.match_pkgs(atom))
6709
6710                         if not blocked_initial and not blocked_final:
6711                                 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6712                                 self._blocker_parents.remove(blocker)
6713                                 # Discard any parents that don't have any more blockers.
6714                                 for pkg in parent_pkgs:
6715                                         self._irrelevant_blockers.add(blocker, pkg)
6716                                         if not self._blocker_parents.child_nodes(pkg):
6717                                                 self._blocker_parents.remove(pkg)
6718                                 continue
6719                         for parent in self._blocker_parents.parent_nodes(blocker):
6720                                 unresolved_blocks = False
6721                                 depends_on_order = set()
6722                                 for pkg in blocked_initial:
6723                                         if pkg.slot_atom == parent.slot_atom:
6724                                                 # TODO: Support blocks within slots in cases where it
6725                                                 # might make sense.  For example, a new version might
6726                                                 # require that the old version be uninstalled at build
6727                                                 # time.
6728                                                 continue
6729                                         if parent.installed:
6730                                                 # Two currently installed packages conflict with
6731                                                 # eachother. Ignore this case since the damage
6732                                                 # is already done and this would be likely to
6733                                                 # confuse users if displayed like a normal blocker.
6734                                                 continue
6735
6736                                         self._blocked_pkgs.add(pkg, blocker)
6737
6738                                         if parent.operation == "merge":
6739                                                 # Maybe the blocked package can be replaced or simply
6740                                                 # unmerged to resolve this block.
6741                                                 depends_on_order.add((pkg, parent))
6742                                                 continue
6743                                         # None of the above blocker resolutions techniques apply,
6744                                         # so apparently this one is unresolvable.
6745                                         unresolved_blocks = True
6746                                 for pkg in blocked_final:
6747                                         if pkg.slot_atom == parent.slot_atom:
6748                                                 # TODO: Support blocks within slots.
6749                                                 continue
6750                                         if parent.operation == "nomerge" and \
6751                                                 pkg.operation == "nomerge":
6752                                                 # This blocker will be handled the next time that a
6753                                                 # merge of either package is triggered.
6754                                                 continue
6755
6756                                         self._blocked_pkgs.add(pkg, blocker)
6757
6758                                         # Maybe the blocking package can be
6759                                         # unmerged to resolve this block.
6760                                         if parent.operation == "merge" and pkg.installed:
6761                                                 depends_on_order.add((pkg, parent))
6762                                                 continue
6763                                         elif parent.operation == "nomerge":
6764                                                 depends_on_order.add((parent, pkg))
6765                                                 continue
6766                                         # None of the above blocker resolutions techniques apply,
6767                                         # so apparently this one is unresolvable.
6768                                         unresolved_blocks = True
6769
6770                                 # Make sure we don't unmerge any package that have been pulled
6771                                 # into the graph.
6772                                 if not unresolved_blocks and depends_on_order:
6773                                         for inst_pkg, inst_task in depends_on_order:
6774                                                 if self.digraph.contains(inst_pkg) and \
6775                                                         self.digraph.parent_nodes(inst_pkg):
6776                                                         unresolved_blocks = True
6777                                                         break
6778
6779                                 if not unresolved_blocks and depends_on_order:
6780                                         for inst_pkg, inst_task in depends_on_order:
6781                                                 uninst_task = Package(built=inst_pkg.built,
6782                                                         cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6783                                                         metadata=inst_pkg.metadata,
6784                                                         operation="uninstall",
6785                                                         root_config=inst_pkg.root_config,
6786                                                         type_name=inst_pkg.type_name)
6787                                                 self._pkg_cache[uninst_task] = uninst_task
6788                                                 # Enforce correct merge order with a hard dep.
6789                                                 self.digraph.addnode(uninst_task, inst_task,
6790                                                         priority=BlockerDepPriority.instance)
6791                                                 # Count references to this blocker so that it can be
6792                                                 # invalidated after nodes referencing it have been
6793                                                 # merged.
6794                                                 self._blocker_uninstalls.addnode(uninst_task, blocker)
6795                                 if not unresolved_blocks and not depends_on_order:
6796                                         self._irrelevant_blockers.add(blocker, parent)
6797                                         self._blocker_parents.remove_edge(blocker, parent)
6798                                         if not self._blocker_parents.parent_nodes(blocker):
6799                                                 self._blocker_parents.remove(blocker)
6800                                         if not self._blocker_parents.child_nodes(parent):
6801                                                 self._blocker_parents.remove(parent)
6802                                 if unresolved_blocks:
6803                                         self._unsolvable_blockers.add(blocker, parent)
6804
6805                 return True
6806
6807         def _accept_blocker_conflicts(self):
6808                 acceptable = False
6809                 for x in ("--buildpkgonly", "--fetchonly",
6810                         "--fetch-all-uri", "--nodeps"):
6811                         if x in self.myopts:
6812                                 acceptable = True
6813                                 break
6814                 return acceptable
6815
6816         def _merge_order_bias(self, mygraph):
6817                 """
6818                 For optimal leaf node selection, promote deep system runtime deps and
6819                 order nodes from highest to lowest overall reference count.
6820                 """
6821
6822                 node_info = {}
6823                 for node in mygraph.order:
6824                         node_info[node] = len(mygraph.parent_nodes(node))
6825                 deep_system_deps = (_find_deep_system_runtime_deps(mygraph))
6826
6827                 def cmp_merge_preference(node1, node2):
6828
6829                         if node1.operation == 'uninstall':
6830                                 if node2.operation == 'uninstall':
6831                                         return 0
6832                                 return 1
6833
6834                         if node2.operation == 'uninstall':
6835                                 if node1.operation == 'uninstall':
6836                                         return 0
6837                                 return -1
6838
6839                         node1_sys = node1 in deep_system_deps
6840                         node2_sys = node2 in deep_system_deps
6841                         if node1_sys != node2_sys:
6842                                 if node1_sys:
6843                                         return -1
6844                                 return 1
6845
6846                         return node_info[node2] - node_info[node1]
6847
6848                 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
6849
6850         def altlist(self, reversed=False):
6851
6852                 while self._serialized_tasks_cache is None:
6853                         self._resolve_conflicts()
6854                         try:
6855                                 self._serialized_tasks_cache, self._scheduler_graph = \
6856                                         self._serialize_tasks()
6857                         except self._serialize_tasks_retry:
6858                                 pass
6859
6860                 retlist = self._serialized_tasks_cache[:]
6861                 if reversed:
6862                         retlist.reverse()
6863                 return retlist
6864
6865         def schedulerGraph(self):
6866                 """
6867                 The scheduler graph is identical to the normal one except that
6868                 uninstall edges are reversed in specific cases that require
6869                 conflicting packages to be temporarily installed simultaneously.
6870                 This is intended for use by the Scheduler in it's parallelization
6871                 logic. It ensures that temporary simultaneous installation of
6872                 conflicting packages is avoided when appropriate (especially for
6873                 !!atom blockers), but allowed in specific cases that require it.
6874
6875                 Note that this method calls break_refs() which alters the state of
6876                 internal Package instances such that this depgraph instance should
6877                 not be used to perform any more calculations.
6878                 """
6879                 if self._scheduler_graph is None:
6880                         self.altlist()
6881                 self.break_refs(self._scheduler_graph.order)
6882                 return self._scheduler_graph
6883
6884         def break_refs(self, nodes):
6885                 """
6886                 Take a mergelist like that returned from self.altlist() and
6887                 break any references that lead back to the depgraph. This is
6888                 useful if you want to hold references to packages without
6889                 also holding the depgraph on the heap.
6890                 """
6891                 for node in nodes:
6892                         if hasattr(node, "root_config"):
6893                                 # The FakeVartree references the _package_cache which
6894                                 # references the depgraph. So that Package instances don't
6895                                 # hold the depgraph and FakeVartree on the heap, replace
6896                                 # the RootConfig that references the FakeVartree with the
6897                                 # original RootConfig instance which references the actual
6898                                 # vartree.
6899                                 node.root_config = \
6900                                         self._trees_orig[node.root_config.root]["root_config"]
6901
6902         def _resolve_conflicts(self):
6903                 if not self._complete_graph():
6904                         raise self._unknown_internal_error()
6905
6906                 if not self.validate_blockers():
6907                         raise self._unknown_internal_error()
6908
6909                 if self._slot_collision_info:
6910                         self._process_slot_conflicts()
6911
6912         def _serialize_tasks(self):
6913
6914                 if "--debug" in self.myopts:
6915                         writemsg("\ndigraph:\n\n", noiselevel=-1)
6916                         self.digraph.debug_print()
6917                         writemsg("\n", noiselevel=-1)
6918
6919                 scheduler_graph = self.digraph.copy()
6920                 mygraph=self.digraph.copy()
6921                 # Prune "nomerge" root nodes if nothing depends on them, since
6922                 # otherwise they slow down merge order calculation. Don't remove
6923                 # non-root nodes since they help optimize merge order in some cases
6924                 # such as revdep-rebuild.
6925                 removed_nodes = set()
6926                 while True:
6927                         for node in mygraph.root_nodes():
6928                                 if not isinstance(node, Package) or \
6929                                         node.installed or node.onlydeps:
6930                                         removed_nodes.add(node)
6931                         if removed_nodes:
6932                                 self.spinner.update()
6933                                 mygraph.difference_update(removed_nodes)
6934                         if not removed_nodes:
6935                                 break
6936                         removed_nodes.clear()
6937                 self._merge_order_bias(mygraph)
6938                 def cmp_circular_bias(n1, n2):
6939                         """
6940                         RDEPEND is stronger than PDEPEND and this function
6941                         measures such a strength bias within a circular
6942                         dependency relationship.
6943                         """
6944                         n1_n2_medium = n2 in mygraph.child_nodes(n1,
6945                                 ignore_priority=priority_range.ignore_medium_soft)
6946                         n2_n1_medium = n1 in mygraph.child_nodes(n2,
6947                                 ignore_priority=priority_range.ignore_medium_soft)
6948                         if n1_n2_medium == n2_n1_medium:
6949                                 return 0
6950                         elif n1_n2_medium:
6951                                 return 1
6952                         return -1
6953                 myblocker_uninstalls = self._blocker_uninstalls.copy()
6954                 retlist=[]
6955                 # Contains uninstall tasks that have been scheduled to
6956                 # occur after overlapping blockers have been installed.
6957                 scheduled_uninstalls = set()
6958                 # Contains any Uninstall tasks that have been ignored
6959                 # in order to avoid the circular deps code path. These
6960                 # correspond to blocker conflicts that could not be
6961                 # resolved.
6962                 ignored_uninstall_tasks = set()
6963                 have_uninstall_task = False
6964                 complete = "complete" in self.myparams
6965                 asap_nodes = []
6966
6967                 def get_nodes(**kwargs):
6968                         """
6969                         Returns leaf nodes excluding Uninstall instances
6970                         since those should be executed as late as possible.
6971                         """
6972                         return [node for node in mygraph.leaf_nodes(**kwargs) \
6973                                 if isinstance(node, Package) and \
6974                                         (node.operation != "uninstall" or \
6975                                         node in scheduled_uninstalls)]
6976
6977                 # sys-apps/portage needs special treatment if ROOT="/"
6978                 running_root = self._running_root.root
6979                 from portage.const import PORTAGE_PACKAGE_ATOM
6980                 runtime_deps = InternalPackageSet(
6981                         initial_atoms=[PORTAGE_PACKAGE_ATOM])
6982                 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
6983                         PORTAGE_PACKAGE_ATOM)
6984                 replacement_portage = self.mydbapi[running_root].match_pkgs(
6985                         PORTAGE_PACKAGE_ATOM)
6986
6987                 if running_portage:
6988                         running_portage = running_portage[0]
6989                 else:
6990                         running_portage = None
6991
6992                 if replacement_portage:
6993                         replacement_portage = replacement_portage[0]
6994                 else:
6995                         replacement_portage = None
6996
6997                 if replacement_portage == running_portage:
6998                         replacement_portage = None
6999
7000                 if replacement_portage is not None:
7001                         # update from running_portage to replacement_portage asap
7002                         asap_nodes.append(replacement_portage)
7003
7004                 if running_portage is not None:
7005                         try:
7006                                 portage_rdepend = self._select_atoms_highest_available(
7007                                         running_root, running_portage.metadata["RDEPEND"],
7008                                         myuse=running_portage.use.enabled,
7009                                         parent=running_portage, strict=False)
7010                         except portage.exception.InvalidDependString, e:
7011                                 portage.writemsg("!!! Invalid RDEPEND in " + \
7012                                         "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
7013                                         (running_root, running_portage.cpv, e), noiselevel=-1)
7014                                 del e
7015                                 portage_rdepend = []
7016                         runtime_deps.update(atom for atom in portage_rdepend \
7017                                 if not atom.startswith("!"))
7018
7019                 def gather_deps(ignore_priority, mergeable_nodes,
7020                         selected_nodes, node):
7021                         """
7022                         Recursively gather a group of nodes that RDEPEND on
7023                         eachother. This ensures that they are merged as a group
7024                         and get their RDEPENDs satisfied as soon as possible.
7025                         """
7026                         if node in selected_nodes:
7027                                 return True
7028                         if node not in mergeable_nodes:
7029                                 return False
7030                         if node == replacement_portage and \
7031                                 mygraph.child_nodes(node,
7032                                 ignore_priority=priority_range.ignore_medium_soft):
7033                                 # Make sure that portage always has all of it's
7034                                 # RDEPENDs installed first.
7035                                 return False
7036                         selected_nodes.add(node)
7037                         for child in mygraph.child_nodes(node,
7038                                 ignore_priority=ignore_priority):
7039                                 if not gather_deps(ignore_priority,
7040                                         mergeable_nodes, selected_nodes, child):
7041                                         return False
7042                         return True
7043
7044                 def ignore_uninst_or_med(priority):
7045                         if priority is BlockerDepPriority.instance:
7046                                 return True
7047                         return priority_range.ignore_medium(priority)
7048
7049                 def ignore_uninst_or_med_soft(priority):
7050                         if priority is BlockerDepPriority.instance:
7051                                 return True
7052                         return priority_range.ignore_medium_soft(priority)
7053
7054                 tree_mode = "--tree" in self.myopts
7055                 # Tracks whether or not the current iteration should prefer asap_nodes
7056                 # if available.  This is set to False when the previous iteration
7057                 # failed to select any nodes.  It is reset whenever nodes are
7058                 # successfully selected.
7059                 prefer_asap = True
7060
7061                 # Controls whether or not the current iteration should drop edges that
7062                 # are "satisfied" by installed packages, in order to solve circular
7063                 # dependencies. The deep runtime dependencies of installed packages are
7064                 # not checked in this case (bug #199856), so it must be avoided
7065                 # whenever possible.
7066                 drop_satisfied = False
7067
7068                 # State of variables for successive iterations that loosen the
7069                 # criteria for node selection.
7070                 #
7071                 # iteration   prefer_asap   drop_satisfied
7072                 # 1           True          False
7073                 # 2           False         False
7074                 # 3           False         True
7075                 #
7076                 # If no nodes are selected on the last iteration, it is due to
7077                 # unresolved blockers or circular dependencies.
7078
7079                 while not mygraph.empty():
7080                         self.spinner.update()
7081                         selected_nodes = None
7082                         ignore_priority = None
7083                         if drop_satisfied or (prefer_asap and asap_nodes):
7084                                 priority_range = DepPrioritySatisfiedRange
7085                         else:
7086                                 priority_range = DepPriorityNormalRange
7087                         if prefer_asap and asap_nodes:
7088                                 # ASAP nodes are merged before their soft deps. Go ahead and
7089                                 # select root nodes here if necessary, since it's typical for
7090                                 # the parent to have been removed from the graph already.
7091                                 asap_nodes = [node for node in asap_nodes \
7092                                         if mygraph.contains(node)]
7093                                 for node in asap_nodes:
7094                                         if not mygraph.child_nodes(node,
7095                                                 ignore_priority=priority_range.ignore_soft):
7096                                                 selected_nodes = [node]
7097                                                 asap_nodes.remove(node)
7098                                                 break
7099                         if not selected_nodes and \
7100                                 not (prefer_asap and asap_nodes):
7101                                 for i in xrange(priority_range.NONE,
7102                                         priority_range.MEDIUM_SOFT + 1):
7103                                         ignore_priority = priority_range.ignore_priority[i]
7104                                         nodes = get_nodes(ignore_priority=ignore_priority)
7105                                         if nodes:
7106                                                 # If there is a mix of uninstall nodes with other
7107                                                 # types, save the uninstall nodes for later since
7108                                                 # sometimes a merge node will render an uninstall
7109                                                 # node unnecessary (due to occupying the same slot),
7110                                                 # and we want to avoid executing a separate uninstall
7111                                                 # task in that case.
7112                                                 if len(nodes) > 1:
7113                                                         good_uninstalls = []
7114                                                         with_some_uninstalls_excluded = []
7115                                                         for node in nodes:
7116                                                                 if node.operation == "uninstall":
7117                                                                         slot_node = self.mydbapi[node.root
7118                                                                                 ].match_pkgs(node.slot_atom)
7119                                                                         if slot_node and \
7120                                                                                 slot_node[0].operation == "merge":
7121                                                                                 continue
7122                                                                         good_uninstalls.append(node)
7123                                                                 with_some_uninstalls_excluded.append(node)
7124                                                         if good_uninstalls:
7125                                                                 nodes = good_uninstalls
7126                                                         elif with_some_uninstalls_excluded:
7127                                                                 nodes = with_some_uninstalls_excluded
7128                                                         else:
7129                                                                 nodes = nodes
7130
7131                                                 if ignore_priority is None and not tree_mode:
7132                                                         # Greedily pop all of these nodes since no
7133                                                         # relationship has been ignored. This optimization
7134                                                         # destroys --tree output, so it's disabled in tree
7135                                                         # mode.
7136                                                         selected_nodes = nodes
7137                                                 else:
7138                                                         # For optimal merge order:
7139                                                         #  * Only pop one node.
7140                                                         #  * Removing a root node (node without a parent)
7141                                                         #    will not produce a leaf node, so avoid it.
7142                                                         #  * It's normal for a selected uninstall to be a
7143                                                         #    root node, so don't check them for parents.
7144                                                         for node in nodes:
7145                                                                 if node.operation == "uninstall" or \
7146                                                                         mygraph.parent_nodes(node):
7147                                                                         selected_nodes = [node]
7148                                                                         break
7149
7150                                                 if selected_nodes:
7151                                                         break
7152
7153                         if not selected_nodes:
7154                                 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
7155                                 if nodes:
7156                                         mergeable_nodes = set(nodes)
7157                                         if prefer_asap and asap_nodes:
7158                                                 nodes = asap_nodes
7159                                         for i in xrange(priority_range.SOFT,
7160                                                 priority_range.MEDIUM_SOFT + 1):
7161                                                 ignore_priority = priority_range.ignore_priority[i]
7162                                                 for node in nodes:
7163                                                         if not mygraph.parent_nodes(node):
7164                                                                 continue
7165                                                         selected_nodes = set()
7166                                                         if gather_deps(ignore_priority,
7167                                                                 mergeable_nodes, selected_nodes, node):
7168                                                                 break
7169                                                         else:
7170                                                                 selected_nodes = None
7171                                                 if selected_nodes:
7172                                                         break
7173
7174                                         if prefer_asap and asap_nodes and not selected_nodes:
7175                                                 # We failed to find any asap nodes to merge, so ignore
7176                                                 # them for the next iteration.
7177                                                 prefer_asap = False
7178                                                 continue
7179
7180                         if selected_nodes and ignore_priority is not None:
7181                                 # Try to merge ignored medium_soft deps as soon as possible
7182                                 # if they're not satisfied by installed packages.
7183                                 for node in selected_nodes:
7184                                         children = set(mygraph.child_nodes(node))
7185                                         soft = children.difference(
7186                                                 mygraph.child_nodes(node,
7187                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
7188                                         medium_soft = children.difference(
7189                                                 mygraph.child_nodes(node,
7190                                                         ignore_priority = \
7191                                                         DepPrioritySatisfiedRange.ignore_medium_soft))
7192                                         medium_soft.difference_update(soft)
7193                                         for child in medium_soft:
7194                                                 if child in selected_nodes:
7195                                                         continue
7196                                                 if child in asap_nodes:
7197                                                         continue
7198                                                 asap_nodes.append(child)
7199
7200                         if selected_nodes and len(selected_nodes) > 1:
7201                                 if not isinstance(selected_nodes, list):
7202                                         selected_nodes = list(selected_nodes)
7203                                 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
7204
7205                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7206                                 # An Uninstall task needs to be executed in order to
7207                                 # avoid conflict if possible.
7208
7209                                 if drop_satisfied:
7210                                         priority_range = DepPrioritySatisfiedRange
7211                                 else:
7212                                         priority_range = DepPriorityNormalRange
7213
7214                                 mergeable_nodes = get_nodes(
7215                                         ignore_priority=ignore_uninst_or_med)
7216
7217                                 min_parent_deps = None
7218                                 uninst_task = None
7219                                 for task in myblocker_uninstalls.leaf_nodes():
7220                                         # Do some sanity checks so that system or world packages
7221                                         # don't get uninstalled inappropriately here (only really
7222                                         # necessary when --complete-graph has not been enabled).
7223
7224                                         if task in ignored_uninstall_tasks:
7225                                                 continue
7226
7227                                         if task in scheduled_uninstalls:
7228                                                 # It's been scheduled but it hasn't
7229                                                 # been executed yet due to dependence
7230                                                 # on installation of blocking packages.
7231                                                 continue
7232
7233                                         root_config = self.roots[task.root]
7234                                         inst_pkg = self._pkg_cache[
7235                                                 ("installed", task.root, task.cpv, "nomerge")]
7236
7237                                         if self.digraph.contains(inst_pkg):
7238                                                 continue
7239
7240                                         forbid_overlap = False
7241                                         heuristic_overlap = False
7242                                         for blocker in myblocker_uninstalls.parent_nodes(task):
7243                                                 if blocker.eapi in ("0", "1"):
7244                                                         heuristic_overlap = True
7245                                                 elif blocker.atom.blocker.overlap.forbid:
7246                                                         forbid_overlap = True
7247                                                         break
7248                                         if forbid_overlap and running_root == task.root:
7249                                                 continue
7250
7251                                         if heuristic_overlap and running_root == task.root:
7252                                                 # Never uninstall sys-apps/portage or it's essential
7253                                                 # dependencies, except through replacement.
7254                                                 try:
7255                                                         runtime_dep_atoms = \
7256                                                                 list(runtime_deps.iterAtomsForPackage(task))
7257                                                 except portage.exception.InvalidDependString, e:
7258                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7259                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7260                                                                 (task.root, task.cpv, e), noiselevel=-1)
7261                                                         del e
7262                                                         continue
7263
7264                                                 # Don't uninstall a runtime dep if it appears
7265                                                 # to be the only suitable one installed.
7266                                                 skip = False
7267                                                 vardb = root_config.trees["vartree"].dbapi
7268                                                 for atom in runtime_dep_atoms:
7269                                                         other_version = None
7270                                                         for pkg in vardb.match_pkgs(atom):
7271                                                                 if pkg.cpv == task.cpv and \
7272                                                                         pkg.metadata["COUNTER"] == \
7273                                                                         task.metadata["COUNTER"]:
7274                                                                         continue
7275                                                                 other_version = pkg
7276                                                                 break
7277                                                         if other_version is None:
7278                                                                 skip = True
7279                                                                 break
7280                                                 if skip:
7281                                                         continue
7282
7283                                                 # For packages in the system set, don't take
7284                                                 # any chances. If the conflict can't be resolved
7285                                                 # by a normal replacement operation then abort.
7286                                                 skip = False
7287                                                 try:
7288                                                         for atom in root_config.sets[
7289                                                                 "system"].iterAtomsForPackage(task):
7290                                                                 skip = True
7291                                                                 break
7292                                                 except portage.exception.InvalidDependString, e:
7293                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7294                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7295                                                                 (task.root, task.cpv, e), noiselevel=-1)
7296                                                         del e
7297                                                         skip = True
7298                                                 if skip:
7299                                                         continue
7300
7301                                         # Note that the world check isn't always
7302                                         # necessary since self._complete_graph() will
7303                                         # add all packages from the system and world sets to the
7304                                         # graph. This just allows unresolved conflicts to be
7305                                         # detected as early as possible, which makes it possible
7306                                         # to avoid calling self._complete_graph() when it is
7307                                         # unnecessary due to blockers triggering an abortion.
7308                                         if not complete:
7309                                                 # For packages in the world set, go ahead an uninstall
7310                                                 # when necessary, as long as the atom will be satisfied
7311                                                 # in the final state.
7312                                                 graph_db = self.mydbapi[task.root]
7313                                                 skip = False
7314                                                 try:
7315                                                         for atom in root_config.sets[
7316                                                                 "world"].iterAtomsForPackage(task):
7317                                                                 satisfied = False
7318                                                                 for pkg in graph_db.match_pkgs(atom):
7319                                                                         if pkg == inst_pkg:
7320                                                                                 continue
7321                                                                         satisfied = True
7322                                                                         break
7323                                                                 if not satisfied:
7324                                                                         skip = True
7325                                                                         self._blocked_world_pkgs[inst_pkg] = atom
7326                                                                         break
7327                                                 except portage.exception.InvalidDependString, e:
7328                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7329                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7330                                                                 (task.root, task.cpv, e), noiselevel=-1)
7331                                                         del e
7332                                                         skip = True
7333                                                 if skip:
7334                                                         continue
7335
7336                                         # Check the deps of parent nodes to ensure that
7337                                         # the chosen task produces a leaf node. Maybe
7338                                         # this can be optimized some more to make the
7339                                         # best possible choice, but the current algorithm
7340                                         # is simple and should be near optimal for most
7341                                         # common cases.
7342                                         mergeable_parent = False
7343                                         parent_deps = set()
7344                                         for parent in mygraph.parent_nodes(task):
7345                                                 parent_deps.update(mygraph.child_nodes(parent,
7346                                                         ignore_priority=priority_range.ignore_medium_soft))
7347                                                 if parent in mergeable_nodes and \
7348                                                         gather_deps(ignore_uninst_or_med_soft,
7349                                                         mergeable_nodes, set(), parent):
7350                                                         mergeable_parent = True
7351
7352                                         if not mergeable_parent:
7353                                                 continue
7354
7355                                         parent_deps.remove(task)
7356                                         if min_parent_deps is None or \
7357                                                 len(parent_deps) < min_parent_deps:
7358                                                 min_parent_deps = len(parent_deps)
7359                                                 uninst_task = task
7360
7361                                 if uninst_task is not None:
7362                                         # The uninstall is performed only after blocking
7363                                         # packages have been merged on top of it. File
7364                                         # collisions between blocking packages are detected
7365                                         # and removed from the list of files to be uninstalled.
7366                                         scheduled_uninstalls.add(uninst_task)
7367                                         parent_nodes = mygraph.parent_nodes(uninst_task)
7368
7369                                         # Reverse the parent -> uninstall edges since we want
7370                                         # to do the uninstall after blocking packages have
7371                                         # been merged on top of it.
7372                                         mygraph.remove(uninst_task)
7373                                         for blocked_pkg in parent_nodes:
7374                                                 mygraph.add(blocked_pkg, uninst_task,
7375                                                         priority=BlockerDepPriority.instance)
7376                                                 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7377                                                 scheduler_graph.add(blocked_pkg, uninst_task,
7378                                                         priority=BlockerDepPriority.instance)
7379
7380                                         # Reset the state variables for leaf node selection and
7381                                         # continue trying to select leaf nodes.
7382                                         prefer_asap = True
7383                                         drop_satisfied = False
7384                                         continue
7385
7386                         if not selected_nodes:
7387                                 # Only select root nodes as a last resort. This case should
7388                                 # only trigger when the graph is nearly empty and the only
7389                                 # remaining nodes are isolated (no parents or children). Since
7390                                 # the nodes must be isolated, ignore_priority is not needed.
7391                                 selected_nodes = get_nodes()
7392
7393                         if not selected_nodes and not drop_satisfied:
7394                                 drop_satisfied = True
7395                                 continue
7396
7397                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7398                                 # If possible, drop an uninstall task here in order to avoid
7399                                 # the circular deps code path. The corresponding blocker will
7400                                 # still be counted as an unresolved conflict.
7401                                 uninst_task = None
7402                                 for node in myblocker_uninstalls.leaf_nodes():
7403                                         try:
7404                                                 mygraph.remove(node)
7405                                         except KeyError:
7406                                                 pass
7407                                         else:
7408                                                 uninst_task = node
7409                                                 ignored_uninstall_tasks.add(node)
7410                                                 break
7411
7412                                 if uninst_task is not None:
7413                                         # Reset the state variables for leaf node selection and
7414                                         # continue trying to select leaf nodes.
7415                                         prefer_asap = True
7416                                         drop_satisfied = False
7417                                         continue
7418
7419                         if not selected_nodes:
7420                                 self._circular_deps_for_display = mygraph
7421                                 raise self._unknown_internal_error()
7422
7423                         # At this point, we've succeeded in selecting one or more nodes, so
7424                         # reset state variables for leaf node selection.
7425                         prefer_asap = True
7426                         drop_satisfied = False
7427
7428                         mygraph.difference_update(selected_nodes)
7429
7430                         for node in selected_nodes:
7431                                 if isinstance(node, Package) and \
7432                                         node.operation == "nomerge":
7433                                         continue
7434
7435                                 # Handle interactions between blockers
7436                                 # and uninstallation tasks.
7437                                 solved_blockers = set()
7438                                 uninst_task = None
7439                                 if isinstance(node, Package) and \
7440                                         "uninstall" == node.operation:
7441                                         have_uninstall_task = True
7442                                         uninst_task = node
7443                                 else:
7444                                         vardb = self.trees[node.root]["vartree"].dbapi
7445                                         previous_cpv = vardb.match(node.slot_atom)
7446                                         if previous_cpv:
7447                                                 # The package will be replaced by this one, so remove
7448                                                 # the corresponding Uninstall task if necessary.
7449                                                 previous_cpv = previous_cpv[0]
7450                                                 uninst_task = \
7451                                                         ("installed", node.root, previous_cpv, "uninstall")
7452                                                 try:
7453                                                         mygraph.remove(uninst_task)
7454                                                 except KeyError:
7455                                                         pass
7456
7457                                 if uninst_task is not None and \
7458                                         uninst_task not in ignored_uninstall_tasks and \
7459                                         myblocker_uninstalls.contains(uninst_task):
7460                                         blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7461                                         myblocker_uninstalls.remove(uninst_task)
7462                                         # Discard any blockers that this Uninstall solves.
7463                                         for blocker in blocker_nodes:
7464                                                 if not myblocker_uninstalls.child_nodes(blocker):
7465                                                         myblocker_uninstalls.remove(blocker)
7466                                                         solved_blockers.add(blocker)
7467
7468                                 retlist.append(node)
7469
7470                                 if (isinstance(node, Package) and \
7471                                         "uninstall" == node.operation) or \
7472                                         (uninst_task is not None and \
7473                                         uninst_task in scheduled_uninstalls):
7474                                         # Include satisfied blockers in the merge list
7475                                         # since the user might be interested and also
7476                                         # it serves as an indicator that blocking packages
7477                                         # will be temporarily installed simultaneously.
7478                                         for blocker in solved_blockers:
7479                                                 retlist.append(Blocker(atom=blocker.atom,
7480                                                         root=blocker.root, eapi=blocker.eapi,
7481                                                         satisfied=True))
7482
7483                 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7484                 for node in myblocker_uninstalls.root_nodes():
7485                         unsolvable_blockers.add(node)
7486
7487                 for blocker in unsolvable_blockers:
7488                         retlist.append(blocker)
7489
7490                 # If any Uninstall tasks need to be executed in order
7491                 # to avoid a conflict, complete the graph with any
7492                 # dependencies that may have been initially
7493                 # neglected (to ensure that unsafe Uninstall tasks
7494                 # are properly identified and blocked from execution).
7495                 if have_uninstall_task and \
7496                         not complete and \
7497                         not unsolvable_blockers:
7498                         self.myparams.add("complete")
7499                         raise self._serialize_tasks_retry("")
7500
7501                 if unsolvable_blockers and \
7502                         not self._accept_blocker_conflicts():
7503                         self._unsatisfied_blockers_for_display = unsolvable_blockers
7504                         self._serialized_tasks_cache = retlist[:]
7505                         self._scheduler_graph = scheduler_graph
7506                         raise self._unknown_internal_error()
7507
7508                 if self._slot_collision_info and \
7509                         not self._accept_blocker_conflicts():
7510                         self._serialized_tasks_cache = retlist[:]
7511                         self._scheduler_graph = scheduler_graph
7512                         raise self._unknown_internal_error()
7513
7514                 return retlist, scheduler_graph
7515
7516         def _show_circular_deps(self, mygraph):
7517                 # No leaf nodes are available, so we have a circular
7518                 # dependency panic situation.  Reduce the noise level to a
7519                 # minimum via repeated elimination of root nodes since they
7520                 # have no parents and thus can not be part of a cycle.
7521                 while True:
7522                         root_nodes = mygraph.root_nodes(
7523                                 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
7524                         if not root_nodes:
7525                                 break
7526                         mygraph.difference_update(root_nodes)
7527                 # Display the USE flags that are enabled on nodes that are part
7528                 # of dependency cycles in case that helps the user decide to
7529                 # disable some of them.
7530                 display_order = []
7531                 tempgraph = mygraph.copy()
7532                 while not tempgraph.empty():
7533                         nodes = tempgraph.leaf_nodes()
7534                         if not nodes:
7535                                 node = tempgraph.order[0]
7536                         else:
7537                                 node = nodes[0]
7538                         display_order.append(node)
7539                         tempgraph.remove(node)
7540                 display_order.reverse()
7541                 self.myopts.pop("--quiet", None)
7542                 self.myopts.pop("--verbose", None)
7543                 self.myopts["--tree"] = True
7544                 portage.writemsg("\n\n", noiselevel=-1)
7545                 self.display(display_order)
7546                 prefix = colorize("BAD", " * ")
7547                 portage.writemsg("\n", noiselevel=-1)
7548                 portage.writemsg(prefix + "Error: circular dependencies:\n",
7549                         noiselevel=-1)
7550                 portage.writemsg("\n", noiselevel=-1)
7551                 mygraph.debug_print()
7552                 portage.writemsg("\n", noiselevel=-1)
7553                 portage.writemsg(prefix + "Note that circular dependencies " + \
7554                         "can often be avoided by temporarily\n", noiselevel=-1)
7555                 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7556                         "optional dependencies.\n", noiselevel=-1)
7557
7558         def _show_merge_list(self):
7559                 if self._serialized_tasks_cache is not None and \
7560                         not (self._displayed_list and \
7561                         (self._displayed_list == self._serialized_tasks_cache or \
7562                         self._displayed_list == \
7563                                 list(reversed(self._serialized_tasks_cache)))):
7564                         display_list = self._serialized_tasks_cache[:]
7565                         if "--tree" in self.myopts:
7566                                 display_list.reverse()
7567                         self.display(display_list)
7568
7569         def _show_unsatisfied_blockers(self, blockers):
7570                 self._show_merge_list()
7571                 msg = "Error: The above package list contains " + \
7572                         "packages which cannot be installed " + \
7573                         "at the same time on the same system."
7574                 prefix = colorize("BAD", " * ")
7575                 from textwrap import wrap
7576                 portage.writemsg("\n", noiselevel=-1)
7577                 for line in wrap(msg, 70):
7578                         portage.writemsg(prefix + line + "\n", noiselevel=-1)
7579
7580                 # Display the conflicting packages along with the packages
7581                 # that pulled them in. This is helpful for troubleshooting
7582                 # cases in which blockers don't solve automatically and
7583                 # the reasons are not apparent from the normal merge list
7584                 # display.
7585
7586                 conflict_pkgs = {}
7587                 for blocker in blockers:
7588                         for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7589                                 self._blocker_parents.parent_nodes(blocker)):
7590                                 parent_atoms = self._parent_atoms.get(pkg)
7591                                 if not parent_atoms:
7592                                         atom = self._blocked_world_pkgs.get(pkg)
7593                                         if atom is not None:
7594                                                 parent_atoms = set([("@world", atom)])
7595                                 if parent_atoms:
7596                                         conflict_pkgs[pkg] = parent_atoms
7597
7598                 if conflict_pkgs:
7599                         # Reduce noise by pruning packages that are only
7600                         # pulled in by other conflict packages.
7601                         pruned_pkgs = set()
7602                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7603                                 relevant_parent = False
7604                                 for parent, atom in parent_atoms:
7605                                         if parent not in conflict_pkgs:
7606                                                 relevant_parent = True
7607                                                 break
7608                                 if not relevant_parent:
7609                                         pruned_pkgs.add(pkg)
7610                         for pkg in pruned_pkgs:
7611                                 del conflict_pkgs[pkg]
7612
7613                 if conflict_pkgs:
7614                         msg = []
7615                         msg.append("\n")
7616                         indent = "  "
7617                         # Max number of parents shown, to avoid flooding the display.
7618                         max_parents = 3
7619                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7620
7621                                 pruned_list = set()
7622
7623                                 # Prefer packages that are not directly involved in a conflict.
7624                                 for parent_atom in parent_atoms:
7625                                         if len(pruned_list) >= max_parents:
7626                                                 break
7627                                         parent, atom = parent_atom
7628                                         if parent not in conflict_pkgs:
7629                                                 pruned_list.add(parent_atom)
7630
7631                                 for parent_atom in parent_atoms:
7632                                         if len(pruned_list) >= max_parents:
7633                                                 break
7634                                         pruned_list.add(parent_atom)
7635
7636                                 omitted_parents = len(parent_atoms) - len(pruned_list)
7637                                 msg.append(indent + "%s pulled in by\n" % pkg)
7638
7639                                 for parent_atom in pruned_list:
7640                                         parent, atom = parent_atom
7641                                         msg.append(2*indent)
7642                                         if isinstance(parent,
7643                                                 (PackageArg, AtomArg)):
7644                                                 # For PackageArg and AtomArg types, it's
7645                                                 # redundant to display the atom attribute.
7646                                                 msg.append(str(parent))
7647                                         else:
7648                                                 # Display the specific atom from SetArg or
7649                                                 # Package types.
7650                                                 msg.append("%s required by %s" % (atom, parent))
7651                                         msg.append("\n")
7652
7653                                 if omitted_parents:
7654                                         msg.append(2*indent)
7655                                         msg.append("(and %d more)\n" % omitted_parents)
7656
7657                                 msg.append("\n")
7658
7659                         sys.stderr.write("".join(msg))
7660                         sys.stderr.flush()
7661
7662                 if "--quiet" not in self.myopts:
7663                         show_blocker_docs_link()
7664
7665         def display(self, mylist, favorites=[], verbosity=None):
7666
7667                 # This is used to prevent display_problems() from
7668                 # redundantly displaying this exact same merge list
7669                 # again via _show_merge_list().
7670                 self._displayed_list = mylist
7671
7672                 if verbosity is None:
7673                         verbosity = ("--quiet" in self.myopts and 1 or \
7674                                 "--verbose" in self.myopts and 3 or 2)
7675                 favorites_set = InternalPackageSet(favorites)
7676                 oneshot = "--oneshot" in self.myopts or \
7677                         "--onlydeps" in self.myopts
7678                 columns = "--columns" in self.myopts
7679                 changelogs=[]
7680                 p=[]
7681                 blockers = []
7682
7683                 counters = PackageCounters()
7684
7685                 if verbosity == 1 and "--verbose" not in self.myopts:
7686                         def create_use_string(*args):
7687                                 return ""
7688                 else:
7689                         def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7690                                 old_iuse, old_use,
7691                                 is_new, reinst_flags,
7692                                 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7693                                 alphabetical=("--alphabetical" in self.myopts)):
7694                                 enabled = []
7695                                 if alphabetical:
7696                                         disabled = enabled
7697                                         removed = enabled
7698                                 else:
7699                                         disabled = []
7700                                         removed = []
7701                                 cur_iuse = set(cur_iuse)
7702                                 enabled_flags = cur_iuse.intersection(cur_use)
7703                                 removed_iuse = set(old_iuse).difference(cur_iuse)
7704                                 any_iuse = cur_iuse.union(old_iuse)
7705                                 any_iuse = list(any_iuse)
7706                                 any_iuse.sort()
7707                                 for flag in any_iuse:
7708                                         flag_str = None
7709                                         isEnabled = False
7710                                         reinst_flag = reinst_flags and flag in reinst_flags
7711                                         if flag in enabled_flags:
7712                                                 isEnabled = True
7713                                                 if is_new or flag in old_use and \
7714                                                         (all_flags or reinst_flag):
7715                                                         flag_str = red(flag)
7716                                                 elif flag not in old_iuse:
7717                                                         flag_str = yellow(flag) + "%*"
7718                                                 elif flag not in old_use:
7719                                                         flag_str = green(flag) + "*"
7720                                         elif flag in removed_iuse:
7721                                                 if all_flags or reinst_flag:
7722                                                         flag_str = yellow("-" + flag) + "%"
7723                                                         if flag in old_use:
7724                                                                 flag_str += "*"
7725                                                         flag_str = "(" + flag_str + ")"
7726                                                         removed.append(flag_str)
7727                                                 continue
7728                                         else:
7729                                                 if is_new or flag in old_iuse and \
7730                                                         flag not in old_use and \
7731                                                         (all_flags or reinst_flag):
7732                                                         flag_str = blue("-" + flag)
7733                                                 elif flag not in old_iuse:
7734                                                         flag_str = yellow("-" + flag)
7735                                                         if flag not in iuse_forced:
7736                                                                 flag_str += "%"
7737                                                 elif flag in old_use:
7738                                                         flag_str = green("-" + flag) + "*"
7739                                         if flag_str:
7740                                                 if flag in iuse_forced:
7741                                                         flag_str = "(" + flag_str + ")"
7742                                                 if isEnabled:
7743                                                         enabled.append(flag_str)
7744                                                 else:
7745                                                         disabled.append(flag_str)
7746
7747                                 if alphabetical:
7748                                         ret = " ".join(enabled)
7749                                 else:
7750                                         ret = " ".join(enabled + disabled + removed)
7751                                 if ret:
7752                                         ret = '%s="%s" ' % (name, ret)
7753                                 return ret
7754
7755                 repo_display = RepoDisplay(self.roots)
7756
7757                 tree_nodes = []
7758                 display_list = []
7759                 mygraph = self.digraph.copy()
7760
7761                 # If there are any Uninstall instances, add the corresponding
7762                 # blockers to the digraph (useful for --tree display).
7763
7764                 executed_uninstalls = set(node for node in mylist \
7765                         if isinstance(node, Package) and node.operation == "unmerge")
7766
7767                 for uninstall in self._blocker_uninstalls.leaf_nodes():
7768                         uninstall_parents = \
7769                                 self._blocker_uninstalls.parent_nodes(uninstall)
7770                         if not uninstall_parents:
7771                                 continue
7772
7773                         # Remove the corresponding "nomerge" node and substitute
7774                         # the Uninstall node.
7775                         inst_pkg = self._pkg_cache[
7776                                 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7777                         try:
7778                                 mygraph.remove(inst_pkg)
7779                         except KeyError:
7780                                 pass
7781
7782                         try:
7783                                 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7784                         except KeyError:
7785                                 inst_pkg_blockers = []
7786
7787                         # Break the Package -> Uninstall edges.
7788                         mygraph.remove(uninstall)
7789
7790                         # Resolution of a package's blockers
7791                         # depend on it's own uninstallation.
7792                         for blocker in inst_pkg_blockers:
7793                                 mygraph.add(uninstall, blocker)
7794
7795                         # Expand Package -> Uninstall edges into
7796                         # Package -> Blocker -> Uninstall edges.
7797                         for blocker in uninstall_parents:
7798                                 mygraph.add(uninstall, blocker)
7799                                 for parent in self._blocker_parents.parent_nodes(blocker):
7800                                         if parent != inst_pkg:
7801                                                 mygraph.add(blocker, parent)
7802
7803                         # If the uninstall task did not need to be executed because
7804                         # of an upgrade, display Blocker -> Upgrade edges since the
7805                         # corresponding Blocker -> Uninstall edges will not be shown.
7806                         upgrade_node = \
7807                                 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7808                         if upgrade_node is not None and \
7809                                 uninstall not in executed_uninstalls:
7810                                 for blocker in uninstall_parents:
7811                                         mygraph.add(upgrade_node, blocker)
7812
7813                 unsatisfied_blockers = []
7814                 i = 0
7815                 depth = 0
7816                 shown_edges = set()
7817                 for x in mylist:
7818                         if isinstance(x, Blocker) and not x.satisfied:
7819                                 unsatisfied_blockers.append(x)
7820                                 continue
7821                         graph_key = x
7822                         if "--tree" in self.myopts:
7823                                 depth = len(tree_nodes)
7824                                 while depth and graph_key not in \
7825                                         mygraph.child_nodes(tree_nodes[depth-1]):
7826                                                 depth -= 1
7827                                 if depth:
7828                                         tree_nodes = tree_nodes[:depth]
7829                                         tree_nodes.append(graph_key)
7830                                         display_list.append((x, depth, True))
7831                                         shown_edges.add((graph_key, tree_nodes[depth-1]))
7832                                 else:
7833                                         traversed_nodes = set() # prevent endless circles
7834                                         traversed_nodes.add(graph_key)
7835                                         def add_parents(current_node, ordered):
7836                                                 parent_nodes = None
7837                                                 # Do not traverse to parents if this node is an
7838                                                 # an argument or a direct member of a set that has
7839                                                 # been specified as an argument (system or world).
7840                                                 if current_node not in self._set_nodes:
7841                                                         parent_nodes = mygraph.parent_nodes(current_node)
7842                                                 if parent_nodes:
7843                                                         child_nodes = set(mygraph.child_nodes(current_node))
7844                                                         selected_parent = None
7845                                                         # First, try to avoid a direct cycle.
7846                                                         for node in parent_nodes:
7847                                                                 if not isinstance(node, (Blocker, Package)):
7848                                                                         continue
7849                                                                 if node not in traversed_nodes and \
7850                                                                         node not in child_nodes:
7851                                                                         edge = (current_node, node)
7852                                                                         if edge in shown_edges:
7853                                                                                 continue
7854                                                                         selected_parent = node
7855                                                                         break
7856                                                         if not selected_parent:
7857                                                                 # A direct cycle is unavoidable.
7858                                                                 for node in parent_nodes:
7859                                                                         if not isinstance(node, (Blocker, Package)):
7860                                                                                 continue
7861                                                                         if node not in traversed_nodes:
7862                                                                                 edge = (current_node, node)
7863                                                                                 if edge in shown_edges:
7864                                                                                         continue
7865                                                                                 selected_parent = node
7866                                                                                 break
7867                                                         if selected_parent:
7868                                                                 shown_edges.add((current_node, selected_parent))
7869                                                                 traversed_nodes.add(selected_parent)
7870                                                                 add_parents(selected_parent, False)
7871                                                 display_list.append((current_node,
7872                                                         len(tree_nodes), ordered))
7873                                                 tree_nodes.append(current_node)
7874                                         tree_nodes = []
7875                                         add_parents(graph_key, True)
7876                         else:
7877                                 display_list.append((x, depth, True))
7878                 mylist = display_list
7879                 for x in unsatisfied_blockers:
7880                         mylist.append((x, 0, True))
7881
7882                 last_merge_depth = 0
7883                 for i in xrange(len(mylist)-1,-1,-1):
7884                         graph_key, depth, ordered = mylist[i]
7885                         if not ordered and depth == 0 and i > 0 \
7886                                 and graph_key == mylist[i-1][0] and \
7887                                 mylist[i-1][1] == 0:
7888                                 # An ordered node got a consecutive duplicate when the tree was
7889                                 # being filled in.
7890                                 del mylist[i]
7891                                 continue
7892                         if ordered and graph_key[-1] != "nomerge":
7893                                 last_merge_depth = depth
7894                                 continue
7895                         if depth >= last_merge_depth or \
7896                                 i < len(mylist) - 1 and \
7897                                 depth >= mylist[i+1][1]:
7898                                         del mylist[i]
7899
7900                 from portage import flatten
7901                 from portage.dep import use_reduce, paren_reduce
7902                 # files to fetch list - avoids counting a same file twice
7903                 # in size display (verbose mode)
7904                 myfetchlist=[]
7905
7906                 # Use this set to detect when all the "repoadd" strings are "[0]"
7907                 # and disable the entire repo display in this case.
7908                 repoadd_set = set()
7909
7910                 for mylist_index in xrange(len(mylist)):
7911                         x, depth, ordered = mylist[mylist_index]
7912                         pkg_type = x[0]
7913                         myroot = x[1]
7914                         pkg_key = x[2]
7915                         portdb = self.trees[myroot]["porttree"].dbapi
7916                         bindb  = self.trees[myroot]["bintree"].dbapi
7917                         vardb = self.trees[myroot]["vartree"].dbapi
7918                         vartree = self.trees[myroot]["vartree"]
7919                         pkgsettings = self.pkgsettings[myroot]
7920
7921                         fetch=" "
7922                         indent = " " * depth
7923
7924                         if isinstance(x, Blocker):
7925                                 if x.satisfied:
7926                                         blocker_style = "PKG_BLOCKER_SATISFIED"
7927                                         addl = "%s  %s  " % (colorize(blocker_style, "b"), fetch)
7928                                 else:
7929                                         blocker_style = "PKG_BLOCKER"
7930                                         addl = "%s  %s  " % (colorize(blocker_style, "B"), fetch)
7931                                 if ordered:
7932                                         counters.blocks += 1
7933                                         if x.satisfied:
7934                                                 counters.blocks_satisfied += 1
7935                                 resolved = portage.key_expand(
7936                                         str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
7937                                 if "--columns" in self.myopts and "--quiet" in self.myopts:
7938                                         addl += " " + colorize(blocker_style, resolved)
7939                                 else:
7940                                         addl = "[%s %s] %s%s" % \
7941                                                 (colorize(blocker_style, "blocks"),
7942                                                 addl, indent, colorize(blocker_style, resolved))
7943                                 block_parents = self._blocker_parents.parent_nodes(x)
7944                                 block_parents = set([pnode[2] for pnode in block_parents])
7945                                 block_parents = ", ".join(block_parents)
7946                                 if resolved!=x[2]:
7947                                         addl += colorize(blocker_style,
7948                                                 " (\"%s\" is blocking %s)") % \
7949                                                 (str(x.atom).lstrip("!"), block_parents)
7950                                 else:
7951                                         addl += colorize(blocker_style,
7952                                                 " (is blocking %s)") % block_parents
7953                                 if isinstance(x, Blocker) and x.satisfied:
7954                                         if columns:
7955                                                 continue
7956                                         p.append(addl)
7957                                 else:
7958                                         blockers.append(addl)
7959                         else:
7960                                 pkg_status = x[3]
7961                                 pkg_merge = ordered and pkg_status == "merge"
7962                                 if not pkg_merge and pkg_status == "merge":
7963                                         pkg_status = "nomerge"
7964                                 built = pkg_type != "ebuild"
7965                                 installed = pkg_type == "installed"
7966                                 pkg = x
7967                                 metadata = pkg.metadata
7968                                 ebuild_path = None
7969                                 repo_name = metadata["repository"]
7970                                 if pkg_type == "ebuild":
7971                                         ebuild_path = portdb.findname(pkg_key)
7972                                         if not ebuild_path: # shouldn't happen
7973                                                 raise portage.exception.PackageNotFound(pkg_key)
7974                                         repo_path_real = os.path.dirname(os.path.dirname(
7975                                                 os.path.dirname(ebuild_path)))
7976                                 else:
7977                                         repo_path_real = portdb.getRepositoryPath(repo_name)
7978                                 pkg_use = list(pkg.use.enabled)
7979                                 try:
7980                                         restrict = flatten(use_reduce(paren_reduce(
7981                                                 pkg.metadata["RESTRICT"]), uselist=pkg_use))
7982                                 except portage.exception.InvalidDependString, e:
7983                                         if not pkg.installed:
7984                                                 show_invalid_depstring_notice(x,
7985                                                         pkg.metadata["RESTRICT"], str(e))
7986                                                 del e
7987                                                 return 1
7988                                         restrict = []
7989                                 if "ebuild" == pkg_type and x[3] != "nomerge" and \
7990                                         "fetch" in restrict:
7991                                         fetch = red("F")
7992                                         if ordered:
7993                                                 counters.restrict_fetch += 1
7994                                         if portdb.fetch_check(pkg_key, pkg_use):
7995                                                 fetch = green("f")
7996                                                 if ordered:
7997                                                         counters.restrict_fetch_satisfied += 1
7998
7999                                 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
8000                                 #param is used for -u, where you still *do* want to see when something is being upgraded.
8001                                 myoldbest = []
8002                                 myinslotlist = None
8003                                 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
8004                                 if vardb.cpv_exists(pkg_key):
8005                                         addl="  "+yellow("R")+fetch+"  "
8006                                         if ordered:
8007                                                 if pkg_merge:
8008                                                         counters.reinst += 1
8009                                                 elif pkg_status == "uninstall":
8010                                                         counters.uninst += 1
8011                                 # filter out old-style virtual matches
8012                                 elif installed_versions and \
8013                                         portage.cpv_getkey(installed_versions[0]) == \
8014                                         portage.cpv_getkey(pkg_key):
8015                                         myinslotlist = vardb.match(pkg.slot_atom)
8016                                         # If this is the first install of a new-style virtual, we
8017                                         # need to filter out old-style virtual matches.
8018                                         if myinslotlist and \
8019                                                 portage.cpv_getkey(myinslotlist[0]) != \
8020                                                 portage.cpv_getkey(pkg_key):
8021                                                 myinslotlist = None
8022                                         if myinslotlist:
8023                                                 myoldbest = myinslotlist[:]
8024                                                 addl = "   " + fetch
8025                                                 if not portage.dep.cpvequal(pkg_key,
8026                                                         portage.best([pkg_key] + myoldbest)):
8027                                                         # Downgrade in slot
8028                                                         addl += turquoise("U")+blue("D")
8029                                                         if ordered:
8030                                                                 counters.downgrades += 1
8031                                                 else:
8032                                                         # Update in slot
8033                                                         addl += turquoise("U") + " "
8034                                                         if ordered:
8035                                                                 counters.upgrades += 1
8036                                         else:
8037                                                 # New slot, mark it new.
8038                                                 addl = " " + green("NS") + fetch + "  "
8039                                                 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
8040                                                 if ordered:
8041                                                         counters.newslot += 1
8042
8043                                         if "--changelog" in self.myopts:
8044                                                 inst_matches = vardb.match(pkg.slot_atom)
8045                                                 if inst_matches:
8046                                                         changelogs.extend(self.calc_changelog(
8047                                                                 portdb.findname(pkg_key),
8048                                                                 inst_matches[0], pkg_key))
8049                                 else:
8050                                         addl = " " + green("N") + " " + fetch + "  "
8051                                         if ordered:
8052                                                 counters.new += 1
8053
8054                                 verboseadd = ""
8055                                 repoadd = None
8056
8057                                 if True:
8058                                         # USE flag display
8059                                         forced_flags = set()
8060                                         pkgsettings.setcpv(pkg) # for package.use.{mask,force}
8061                                         forced_flags.update(pkgsettings.useforce)
8062                                         forced_flags.update(pkgsettings.usemask)
8063
8064                                         cur_use = [flag for flag in pkg.use.enabled \
8065                                                 if flag in pkg.iuse.all]
8066                                         cur_iuse = sorted(pkg.iuse.all)
8067
8068                                         if myoldbest and myinslotlist:
8069                                                 previous_cpv = myoldbest[0]
8070                                         else:
8071                                                 previous_cpv = pkg.cpv
8072                                         if vardb.cpv_exists(previous_cpv):
8073                                                 old_iuse, old_use = vardb.aux_get(
8074                                                                 previous_cpv, ["IUSE", "USE"])
8075                                                 old_iuse = list(set(
8076                                                         filter_iuse_defaults(old_iuse.split())))
8077                                                 old_iuse.sort()
8078                                                 old_use = old_use.split()
8079                                                 is_new = False
8080                                         else:
8081                                                 old_iuse = []
8082                                                 old_use = []
8083                                                 is_new = True
8084
8085                                         old_use = [flag for flag in old_use if flag in old_iuse]
8086
8087                                         use_expand = pkgsettings["USE_EXPAND"].lower().split()
8088                                         use_expand.sort()
8089                                         use_expand.reverse()
8090                                         use_expand_hidden = \
8091                                                 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
8092
8093                                         def map_to_use_expand(myvals, forcedFlags=False,
8094                                                 removeHidden=True):
8095                                                 ret = {}
8096                                                 forced = {}
8097                                                 for exp in use_expand:
8098                                                         ret[exp] = []
8099                                                         forced[exp] = set()
8100                                                         for val in myvals[:]:
8101                                                                 if val.startswith(exp.lower()+"_"):
8102                                                                         if val in forced_flags:
8103                                                                                 forced[exp].add(val[len(exp)+1:])
8104                                                                         ret[exp].append(val[len(exp)+1:])
8105                                                                         myvals.remove(val)
8106                                                 ret["USE"] = myvals
8107                                                 forced["USE"] = [val for val in myvals \
8108                                                         if val in forced_flags]
8109                                                 if removeHidden:
8110                                                         for exp in use_expand_hidden:
8111                                                                 ret.pop(exp, None)
8112                                                 if forcedFlags:
8113                                                         return ret, forced
8114                                                 return ret
8115
8116                                         # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
8117                                         # are the only thing that triggered reinstallation.
8118                                         reinst_flags_map = {}
8119                                         reinstall_for_flags = self._reinstall_nodes.get(pkg)
8120                                         reinst_expand_map = None
8121                                         if reinstall_for_flags:
8122                                                 reinst_flags_map = map_to_use_expand(
8123                                                         list(reinstall_for_flags), removeHidden=False)
8124                                                 for k in list(reinst_flags_map):
8125                                                         if not reinst_flags_map[k]:
8126                                                                 del reinst_flags_map[k]
8127                                                 if not reinst_flags_map.get("USE"):
8128                                                         reinst_expand_map = reinst_flags_map.copy()
8129                                                         reinst_expand_map.pop("USE", None)
8130                                         if reinst_expand_map and \
8131                                                 not set(reinst_expand_map).difference(
8132                                                 use_expand_hidden):
8133                                                 use_expand_hidden = \
8134                                                         set(use_expand_hidden).difference(
8135                                                         reinst_expand_map)
8136
8137                                         cur_iuse_map, iuse_forced = \
8138                                                 map_to_use_expand(cur_iuse, forcedFlags=True)
8139                                         cur_use_map = map_to_use_expand(cur_use)
8140                                         old_iuse_map = map_to_use_expand(old_iuse)
8141                                         old_use_map = map_to_use_expand(old_use)
8142
8143                                         use_expand.sort()
8144                                         use_expand.insert(0, "USE")
8145                                         
8146                                         for key in use_expand:
8147                                                 if key in use_expand_hidden:
8148                                                         continue
8149                                                 verboseadd += create_use_string(key.upper(),
8150                                                         cur_iuse_map[key], iuse_forced[key],
8151                                                         cur_use_map[key], old_iuse_map[key],
8152                                                         old_use_map[key], is_new,
8153                                                         reinst_flags_map.get(key))
8154
8155                                 if verbosity == 3:
8156                                         # size verbose
8157                                         mysize=0
8158                                         if pkg_type == "ebuild" and pkg_merge:
8159                                                 try:
8160                                                         myfilesdict = portdb.getfetchsizes(pkg_key,
8161                                                                 useflags=pkg_use, debug=self.edebug)
8162                                                 except portage.exception.InvalidDependString, e:
8163                                                         src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
8164                                                         show_invalid_depstring_notice(x, src_uri, str(e))
8165                                                         del e
8166                                                         return 1
8167                                                 if myfilesdict is None:
8168                                                         myfilesdict="[empty/missing/bad digest]"
8169                                                 else:
8170                                                         for myfetchfile in myfilesdict:
8171                                                                 if myfetchfile not in myfetchlist:
8172                                                                         mysize+=myfilesdict[myfetchfile]
8173                                                                         myfetchlist.append(myfetchfile)
8174                                                         if ordered:
8175                                                                 counters.totalsize += mysize
8176                                                 verboseadd += format_size(mysize)
8177
8178                                         # overlay verbose
8179                                         # assign index for a previous version in the same slot
8180                                         has_previous = False
8181                                         repo_name_prev = None
8182                                         slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
8183                                                 metadata["SLOT"])
8184                                         slot_matches = vardb.match(slot_atom)
8185                                         if slot_matches:
8186                                                 has_previous = True
8187                                                 repo_name_prev = vardb.aux_get(slot_matches[0],
8188                                                         ["repository"])[0]
8189
8190                                         # now use the data to generate output
8191                                         if pkg.installed or not has_previous:
8192                                                 repoadd = repo_display.repoStr(repo_path_real)
8193                                         else:
8194                                                 repo_path_prev = None
8195                                                 if repo_name_prev:
8196                                                         repo_path_prev = portdb.getRepositoryPath(
8197                                                                 repo_name_prev)
8198                                                 if repo_path_prev == repo_path_real:
8199                                                         repoadd = repo_display.repoStr(repo_path_real)
8200                                                 else:
8201                                                         repoadd = "%s=>%s" % (
8202                                                                 repo_display.repoStr(repo_path_prev),
8203                                                                 repo_display.repoStr(repo_path_real))
8204                                         if repoadd:
8205                                                 repoadd_set.add(repoadd)
8206
8207                                 xs = [portage.cpv_getkey(pkg_key)] + \
8208                                         list(portage.catpkgsplit(pkg_key)[2:])
8209                                 if xs[2] == "r0":
8210                                         xs[2] = ""
8211                                 else:
8212                                         xs[2] = "-" + xs[2]
8213
8214                                 mywidth = 130
8215                                 if "COLUMNWIDTH" in self.settings:
8216                                         try:
8217                                                 mywidth = int(self.settings["COLUMNWIDTH"])
8218                                         except ValueError, e:
8219                                                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8220                                                 portage.writemsg(
8221                                                         "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
8222                                                         self.settings["COLUMNWIDTH"], noiselevel=-1)
8223                                                 del e
8224                                 oldlp = mywidth - 30
8225                                 newlp = oldlp - 30
8226
8227                                 # Convert myoldbest from a list to a string.
8228                                 if not myoldbest:
8229                                         myoldbest = ""
8230                                 else:
8231                                         for pos, key in enumerate(myoldbest):
8232                                                 key = portage.catpkgsplit(key)[2] + \
8233                                                         "-" + portage.catpkgsplit(key)[3]
8234                                                 if key[-3:] == "-r0":
8235                                                         key = key[:-3]
8236                                                 myoldbest[pos] = key
8237                                         myoldbest = blue("["+", ".join(myoldbest)+"]")
8238
8239                                 pkg_cp = xs[0]
8240                                 root_config = self.roots[myroot]
8241                                 system_set = root_config.sets["system"]
8242                                 world_set  = root_config.sets["world"]
8243
8244                                 pkg_system = False
8245                                 pkg_world = False
8246                                 try:
8247                                         pkg_system = system_set.findAtomForPackage(pkg)
8248                                         pkg_world  = world_set.findAtomForPackage(pkg)
8249                                         if not (oneshot or pkg_world) and \
8250                                                 myroot == self.target_root and \
8251                                                 favorites_set.findAtomForPackage(pkg):
8252                                                 # Maybe it will be added to world now.
8253                                                 if create_world_atom(pkg, favorites_set, root_config):
8254                                                         pkg_world = True
8255                                 except portage.exception.InvalidDependString:
8256                                         # This is reported elsewhere if relevant.
8257                                         pass
8258
8259                                 def pkgprint(pkg_str):
8260                                         if pkg_merge:
8261                                                 if pkg_system:
8262                                                         return colorize("PKG_MERGE_SYSTEM", pkg_str)
8263                                                 elif pkg_world:
8264                                                         return colorize("PKG_MERGE_WORLD", pkg_str)
8265                                                 else:
8266                                                         return colorize("PKG_MERGE", pkg_str)
8267                                         elif pkg_status == "uninstall":
8268                                                 return colorize("PKG_UNINSTALL", pkg_str)
8269                                         else:
8270                                                 if pkg_system:
8271                                                         return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8272                                                 elif pkg_world:
8273                                                         return colorize("PKG_NOMERGE_WORLD", pkg_str)
8274                                                 else:
8275                                                         return colorize("PKG_NOMERGE", pkg_str)
8276
8277                                 try:
8278                                         properties = flatten(use_reduce(paren_reduce(
8279                                                 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8280                                 except portage.exception.InvalidDependString, e:
8281                                         if not pkg.installed:
8282                                                 show_invalid_depstring_notice(pkg,
8283                                                         pkg.metadata["PROPERTIES"], str(e))
8284                                                 del e
8285                                                 return 1
8286                                         properties = []
8287                                 interactive = "interactive" in properties
8288                                 if interactive and pkg.operation == "merge":
8289                                         addl = colorize("WARN", "I") + addl[1:]
8290                                         if ordered:
8291                                                 counters.interactive += 1
8292
8293                                 if x[1]!="/":
8294                                         if myoldbest:
8295                                                 myoldbest +=" "
8296                                         if "--columns" in self.myopts:
8297                                                 if "--quiet" in self.myopts:
8298                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8299                                                         myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8300                                                         myprint=myprint+myoldbest
8301                                                         myprint=myprint+darkgreen("to "+x[1])
8302                                                         verboseadd = None
8303                                                 else:
8304                                                         if not pkg_merge:
8305                                                                 myprint = "[%s] %s%s" % \
8306                                                                         (pkgprint(pkg_status.ljust(13)),
8307                                                                         indent, pkgprint(pkg.cp))
8308                                                         else:
8309                                                                 myprint = "[%s %s] %s%s" % \
8310                                                                         (pkgprint(pkg.type_name), addl,
8311                                                                         indent, pkgprint(pkg.cp))
8312                                                         if (newlp-nc_len(myprint)) > 0:
8313                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8314                                                         myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8315                                                         if (oldlp-nc_len(myprint)) > 0:
8316                                                                 myprint=myprint+" "*(oldlp-nc_len(myprint))
8317                                                         myprint=myprint+myoldbest
8318                                                         myprint += darkgreen("to " + pkg.root)
8319                                         else:
8320                                                 if not pkg_merge:
8321                                                         myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8322                                                 else:
8323                                                         myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
8324                                                 myprint += indent + pkgprint(pkg_key) + " " + \
8325                                                         myoldbest + darkgreen("to " + myroot)
8326                                 else:
8327                                         if "--columns" in self.myopts:
8328                                                 if "--quiet" in self.myopts:
8329                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8330                                                         myprint=myprint+" "+green(xs[1]+xs[2])+" "
8331                                                         myprint=myprint+myoldbest
8332                                                         verboseadd = None
8333                                                 else:
8334                                                         if not pkg_merge:
8335                                                                 myprint = "[%s] %s%s" % \
8336                                                                         (pkgprint(pkg_status.ljust(13)),
8337                                                                         indent, pkgprint(pkg.cp))
8338                                                         else:
8339                                                                 myprint = "[%s %s] %s%s" % \
8340                                                                         (pkgprint(pkg.type_name), addl,
8341                                                                         indent, pkgprint(pkg.cp))
8342                                                         if (newlp-nc_len(myprint)) > 0:
8343                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8344                                                         myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8345                                                         if (oldlp-nc_len(myprint)) > 0:
8346                                                                 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8347                                                         myprint += myoldbest
8348                                         else:
8349                                                 if not pkg_merge:
8350                                                         myprint = "[%s] %s%s %s" % \
8351                                                                 (pkgprint(pkg_status.ljust(13)),
8352                                                                 indent, pkgprint(pkg.cpv),
8353                                                                 myoldbest)
8354                                                 else:
8355                                                         myprint = "[%s %s] %s%s %s" % \
8356                                                                 (pkgprint(pkg_type), addl, indent,
8357                                                                 pkgprint(pkg.cpv), myoldbest)
8358
8359                                 if columns and pkg.operation == "uninstall":
8360                                         continue
8361                                 p.append((myprint, verboseadd, repoadd))
8362
8363                                 if "--tree" not in self.myopts and \
8364                                         "--quiet" not in self.myopts and \
8365                                         not self._opts_no_restart.intersection(self.myopts) and \
8366                                         pkg.root == self._running_root.root and \
8367                                         portage.match_from_list(
8368                                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8369                                         not vardb.cpv_exists(pkg.cpv) and \
8370                                         "--quiet" not in self.myopts:
8371                                                 if mylist_index < len(mylist) - 1:
8372                                                         p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8373                                                         p.append(colorize("WARN", "    then resume the merge."))
8374
8375                 out = sys.stdout
8376                 show_repos = repoadd_set and repoadd_set != set(["0"])
8377
8378                 for x in p:
8379                         if isinstance(x, basestring):
8380                                 out.write("%s\n" % (x,))
8381                                 continue
8382
8383                         myprint, verboseadd, repoadd = x
8384
8385                         if verboseadd:
8386                                 myprint += " " + verboseadd
8387
8388                         if show_repos and repoadd:
8389                                 myprint += " " + teal("[%s]" % repoadd)
8390
8391                         out.write("%s\n" % (myprint,))
8392
8393                 for x in blockers:
8394                         print x
8395
8396                 if verbosity == 3:
8397                         print
8398                         print counters
8399                         if show_repos:
8400                                 sys.stdout.write(str(repo_display))
8401
8402                 if "--changelog" in self.myopts:
8403                         print
8404                         for revision,text in changelogs:
8405                                 print bold('*'+revision)
8406                                 sys.stdout.write(text)
8407
8408                 sys.stdout.flush()
8409                 return os.EX_OK
8410
8411         def display_problems(self):
8412                 """
8413                 Display problems with the dependency graph such as slot collisions.
8414                 This is called internally by display() to show the problems _after_
8415                 the merge list where it is most likely to be seen, but if display()
8416                 is not going to be called then this method should be called explicitly
8417                 to ensure that the user is notified of problems with the graph.
8418
8419                 All output goes to stderr, except for unsatisfied dependencies which
8420                 go to stdout for parsing by programs such as autounmask.
8421                 """
8422
8423                 # Note that show_masked_packages() sends it's output to
8424                 # stdout, and some programs such as autounmask parse the
8425                 # output in cases when emerge bails out. However, when
8426                 # show_masked_packages() is called for installed packages
8427                 # here, the message is a warning that is more appropriate
8428                 # to send to stderr, so temporarily redirect stdout to
8429                 # stderr. TODO: Fix output code so there's a cleaner way
8430                 # to redirect everything to stderr.
8431                 sys.stdout.flush()
8432                 sys.stderr.flush()
8433                 stdout = sys.stdout
8434                 try:
8435                         sys.stdout = sys.stderr
8436                         self._display_problems()
8437                 finally:
8438                         sys.stdout = stdout
8439                         sys.stdout.flush()
8440                         sys.stderr.flush()
8441
8442                 # This goes to stdout for parsing by programs like autounmask.
8443                 for pargs, kwargs in self._unsatisfied_deps_for_display:
8444                         self._show_unsatisfied_dep(*pargs, **kwargs)
8445
8446         def _display_problems(self):
8447                 if self._circular_deps_for_display is not None:
8448                         self._show_circular_deps(
8449                                 self._circular_deps_for_display)
8450
8451                 # The user is only notified of a slot conflict if
8452                 # there are no unresolvable blocker conflicts.
8453                 if self._unsatisfied_blockers_for_display is not None:
8454                         self._show_unsatisfied_blockers(
8455                                 self._unsatisfied_blockers_for_display)
8456                 else:
8457                         self._show_slot_collision_notice()
8458
8459                 # TODO: Add generic support for "set problem" handlers so that
8460                 # the below warnings aren't special cases for world only.
8461
8462                 if self._missing_args:
8463                         world_problems = False
8464                         if "world" in self._sets:
8465                                 # Filter out indirect members of world (from nested sets)
8466                                 # since only direct members of world are desired here.
8467                                 world_set = self.roots[self.target_root].sets["world"]
8468                                 for arg, atom in self._missing_args:
8469                                         if arg.name == "world" and atom in world_set:
8470                                                 world_problems = True
8471                                                 break
8472
8473                         if world_problems:
8474                                 sys.stderr.write("\n!!! Problems have been " + \
8475                                         "detected with your world file\n")
8476                                 sys.stderr.write("!!! Please run " + \
8477                                         green("emaint --check world")+"\n\n")
8478
8479                 if self._missing_args:
8480                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8481                                 " Ebuilds for the following packages are either all\n")
8482                         sys.stderr.write(colorize("BAD", "!!!") + \
8483                                 " masked or don't exist:\n")
8484                         sys.stderr.write(" ".join(str(atom) for arg, atom in \
8485                                 self._missing_args) + "\n")
8486
8487                 if self._pprovided_args:
8488                         arg_refs = {}
8489                         for arg, atom in self._pprovided_args:
8490                                 if isinstance(arg, SetArg):
8491                                         parent = arg.name
8492                                         arg_atom = (atom, atom)
8493                                 else:
8494                                         parent = "args"
8495                                         arg_atom = (arg.arg, atom)
8496                                 refs = arg_refs.setdefault(arg_atom, [])
8497                                 if parent not in refs:
8498                                         refs.append(parent)
8499                         msg = []
8500                         msg.append(bad("\nWARNING: "))
8501                         if len(self._pprovided_args) > 1:
8502                                 msg.append("Requested packages will not be " + \
8503                                         "merged because they are listed in\n")
8504                         else:
8505                                 msg.append("A requested package will not be " + \
8506                                         "merged because it is listed in\n")
8507                         msg.append("package.provided:\n\n")
8508                         problems_sets = set()
8509                         for (arg, atom), refs in arg_refs.iteritems():
8510                                 ref_string = ""
8511                                 if refs:
8512                                         problems_sets.update(refs)
8513                                         refs.sort()
8514                                         ref_string = ", ".join(["'%s'" % name for name in refs])
8515                                         ref_string = " pulled in by " + ref_string
8516                                 msg.append("  %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8517                         msg.append("\n")
8518                         if "world" in problems_sets:
8519                                 msg.append("This problem can be solved in one of the following ways:\n\n")
8520                                 msg.append("  A) Use emaint to clean offending packages from world (if not installed).\n")
8521                                 msg.append("  B) Uninstall offending packages (cleans them from world).\n")
8522                                 msg.append("  C) Remove offending entries from package.provided.\n\n")
8523                                 msg.append("The best course of action depends on the reason that an offending\n")
8524                                 msg.append("package.provided entry exists.\n\n")
8525                         sys.stderr.write("".join(msg))
8526
8527                 masked_packages = []
8528                 for pkg in self._masked_installed:
8529                         root_config = pkg.root_config
8530                         pkgsettings = self.pkgsettings[pkg.root]
8531                         mreasons = get_masking_status(pkg, pkgsettings, root_config)
8532                         masked_packages.append((root_config, pkgsettings,
8533                                 pkg.cpv, pkg.metadata, mreasons))
8534                 if masked_packages:
8535                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8536                                 " The following installed packages are masked:\n")
8537                         show_masked_packages(masked_packages)
8538                         show_mask_docs()
8539                         print
8540
8541         def calc_changelog(self,ebuildpath,current,next):
8542                 if ebuildpath == None or not os.path.exists(ebuildpath):
8543                         return []
8544                 current = '-'.join(portage.catpkgsplit(current)[1:])
8545                 if current.endswith('-r0'):
8546                         current = current[:-3]
8547                 next = '-'.join(portage.catpkgsplit(next)[1:])
8548                 if next.endswith('-r0'):
8549                         next = next[:-3]
8550                 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8551                 try:
8552                         changelog = open(changelogpath).read()
8553                 except SystemExit, e:
8554                         raise # Needed else can't exit
8555                 except:
8556                         return []
8557                 divisions = self.find_changelog_tags(changelog)
8558                 #print 'XX from',current,'to',next
8559                 #for div,text in divisions: print 'XX',div
8560                 # skip entries for all revisions above the one we are about to emerge
8561                 for i in range(len(divisions)):
8562                         if divisions[i][0]==next:
8563                                 divisions = divisions[i:]
8564                                 break
8565                 # find out how many entries we are going to display
8566                 for i in range(len(divisions)):
8567                         if divisions[i][0]==current:
8568                                 divisions = divisions[:i]
8569                                 break
8570                 else:
8571                     # couldnt find the current revision in the list. display nothing
8572                         return []
8573                 return divisions
8574
8575         def find_changelog_tags(self,changelog):
8576                 divs = []
8577                 release = None
8578                 while 1:
8579                         match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8580                         if match is None:
8581                                 if release is not None:
8582                                         divs.append((release,changelog))
8583                                 return divs
8584                         if release is not None:
8585                                 divs.append((release,changelog[:match.start()]))
8586                         changelog = changelog[match.end():]
8587                         release = match.group(1)
8588                         if release.endswith('.ebuild'):
8589                                 release = release[:-7]
8590                         if release.endswith('-r0'):
8591                                 release = release[:-3]
8592
8593         def saveNomergeFavorites(self):
8594                 """Find atoms in favorites that are not in the mergelist and add them
8595                 to the world file if necessary."""
8596                 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8597                         "--oneshot", "--onlydeps", "--pretend"):
8598                         if x in self.myopts:
8599                                 return
8600                 root_config = self.roots[self.target_root]
8601                 world_set = root_config.sets["world"]
8602
8603                 world_locked = False
8604                 if hasattr(world_set, "lock"):
8605                         world_set.lock()
8606                         world_locked = True
8607
8608                 if hasattr(world_set, "load"):
8609                         world_set.load() # maybe it's changed on disk
8610
8611                 args_set = self._sets["args"]
8612                 portdb = self.trees[self.target_root]["porttree"].dbapi
8613                 added_favorites = set()
8614                 for x in self._set_nodes:
8615                         pkg_type, root, pkg_key, pkg_status = x
8616                         if pkg_status != "nomerge":
8617                                 continue
8618
8619                         try:
8620                                 myfavkey = create_world_atom(x, args_set, root_config)
8621                                 if myfavkey:
8622                                         if myfavkey in added_favorites:
8623                                                 continue
8624                                         added_favorites.add(myfavkey)
8625                         except portage.exception.InvalidDependString, e:
8626                                 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8627                                         (pkg_key, str(e)), noiselevel=-1)
8628                                 writemsg("!!! see '%s'\n\n" % os.path.join(
8629                                         root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8630                                 del e
8631                 all_added = []
8632                 for k in self._sets:
8633                         if k in ("args", "world") or not root_config.sets[k].world_candidate:
8634                                 continue
8635                         s = SETPREFIX + k
8636                         if s in world_set:
8637                                 continue
8638                         all_added.append(SETPREFIX + k)
8639                 all_added.extend(added_favorites)
8640                 all_added.sort()
8641                 for a in all_added:
8642                         print ">>> Recording %s in \"world\" favorites file..." % \
8643                                 colorize("INFORM", str(a))
8644                 if all_added:
8645                         world_set.update(all_added)
8646
8647                 if world_locked:
8648                         world_set.unlock()
8649
8650         def loadResumeCommand(self, resume_data, skip_masked=False):
8651                 """
8652                 Add a resume command to the graph and validate it in the process.  This
8653                 will raise a PackageNotFound exception if a package is not available.
8654                 """
8655
8656                 if not isinstance(resume_data, dict):
8657                         return False
8658
8659                 mergelist = resume_data.get("mergelist")
8660                 if not isinstance(mergelist, list):
8661                         mergelist = []
8662
8663                 fakedb = self.mydbapi
8664                 trees = self.trees
8665                 serialized_tasks = []
8666                 masked_tasks = []
8667                 for x in mergelist:
8668                         if not (isinstance(x, list) and len(x) == 4):
8669                                 continue
8670                         pkg_type, myroot, pkg_key, action = x
8671                         if pkg_type not in self.pkg_tree_map:
8672                                 continue
8673                         if action != "merge":
8674                                 continue
8675                         tree_type = self.pkg_tree_map[pkg_type]
8676                         mydb = trees[myroot][tree_type].dbapi
8677                         db_keys = list(self._trees_orig[myroot][
8678                                 tree_type].dbapi._aux_cache_keys)
8679                         try:
8680                                 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8681                         except KeyError:
8682                                 # It does no exist or it is corrupt.
8683                                 if action == "uninstall":
8684                                         continue
8685                                 raise portage.exception.PackageNotFound(pkg_key)
8686                         installed = action == "uninstall"
8687                         built = pkg_type != "ebuild"
8688                         root_config = self.roots[myroot]
8689                         pkg = Package(built=built, cpv=pkg_key,
8690                                 installed=installed, metadata=metadata,
8691                                 operation=action, root_config=root_config,
8692                                 type_name=pkg_type)
8693                         if pkg_type == "ebuild":
8694                                 pkgsettings = self.pkgsettings[myroot]
8695                                 pkgsettings.setcpv(pkg)
8696                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8697                         self._pkg_cache[pkg] = pkg
8698
8699                         root_config = self.roots[pkg.root]
8700                         if "merge" == pkg.operation and \
8701                                 not visible(root_config.settings, pkg):
8702                                 if skip_masked:
8703                                         masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8704                                 else:
8705                                         self._unsatisfied_deps_for_display.append(
8706                                                 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8707
8708                         fakedb[myroot].cpv_inject(pkg)
8709                         serialized_tasks.append(pkg)
8710                         self.spinner.update()
8711
8712                 if self._unsatisfied_deps_for_display:
8713                         return False
8714
8715                 if not serialized_tasks or "--nodeps" in self.myopts:
8716                         self._serialized_tasks_cache = serialized_tasks
8717                         self._scheduler_graph = self.digraph
8718                 else:
8719                         self._select_package = self._select_pkg_from_graph
8720                         self.myparams.add("selective")
8721                         # Always traverse deep dependencies in order to account for
8722                         # potentially unsatisfied dependencies of installed packages.
8723                         # This is necessary for correct --keep-going or --resume operation
8724                         # in case a package from a group of circularly dependent packages
8725                         # fails. In this case, a package which has recently been installed
8726                         # may have an unsatisfied circular dependency (pulled in by
8727                         # PDEPEND, for example). So, even though a package is already
8728                         # installed, it may not have all of it's dependencies satisfied, so
8729                         # it may not be usable. If such a package is in the subgraph of
8730                         # deep depenedencies of a scheduled build, that build needs to
8731                         # be cancelled. In order for this type of situation to be
8732                         # recognized, deep traversal of dependencies is required.
8733                         self.myparams.add("deep")
8734
8735                         favorites = resume_data.get("favorites")
8736                         args_set = self._sets["args"]
8737                         if isinstance(favorites, list):
8738                                 args = self._load_favorites(favorites)
8739                         else:
8740                                 args = []
8741
8742                         for task in serialized_tasks:
8743                                 if isinstance(task, Package) and \
8744                                         task.operation == "merge":
8745                                         if not self._add_pkg(task, None):
8746                                                 return False
8747
8748                         # Packages for argument atoms need to be explicitly
8749                         # added via _add_pkg() so that they are included in the
8750                         # digraph (needed at least for --tree display).
8751                         for arg in args:
8752                                 for atom in arg.set:
8753                                         pkg, existing_node = self._select_package(
8754                                                 arg.root_config.root, atom)
8755                                         if existing_node is None and \
8756                                                 pkg is not None:
8757                                                 if not self._add_pkg(pkg, Dependency(atom=atom,
8758                                                         root=pkg.root, parent=arg)):
8759                                                         return False
8760
8761                         # Allow unsatisfied deps here to avoid showing a masking
8762                         # message for an unsatisfied dep that isn't necessarily
8763                         # masked.
8764                         if not self._create_graph(allow_unsatisfied=True):
8765                                 return False
8766
8767                         unsatisfied_deps = []
8768                         for dep in self._unsatisfied_deps:
8769                                 if not isinstance(dep.parent, Package):
8770                                         continue
8771                                 if dep.parent.operation == "merge":
8772                                         unsatisfied_deps.append(dep)
8773                                         continue
8774
8775                                 # For unsatisfied deps of installed packages, only account for
8776                                 # them if they are in the subgraph of dependencies of a package
8777                                 # which is scheduled to be installed.
8778                                 unsatisfied_install = False
8779                                 traversed = set()
8780                                 dep_stack = self.digraph.parent_nodes(dep.parent)
8781                                 while dep_stack:
8782                                         node = dep_stack.pop()
8783                                         if not isinstance(node, Package):
8784                                                 continue
8785                                         if node.operation == "merge":
8786                                                 unsatisfied_install = True
8787                                                 break
8788                                         if node in traversed:
8789                                                 continue
8790                                         traversed.add(node)
8791                                         dep_stack.extend(self.digraph.parent_nodes(node))
8792
8793                                 if unsatisfied_install:
8794                                         unsatisfied_deps.append(dep)
8795
8796                         if masked_tasks or unsatisfied_deps:
8797                                 # This probably means that a required package
8798                                 # was dropped via --skipfirst. It makes the
8799                                 # resume list invalid, so convert it to a
8800                                 # UnsatisfiedResumeDep exception.
8801                                 raise self.UnsatisfiedResumeDep(self,
8802                                         masked_tasks + unsatisfied_deps)
8803                         self._serialized_tasks_cache = None
8804                         try:
8805                                 self.altlist()
8806                         except self._unknown_internal_error:
8807                                 return False
8808
8809                 return True
8810
8811         def _load_favorites(self, favorites):
8812                 """
8813                 Use a list of favorites to resume state from a
8814                 previous select_files() call. This creates similar
8815                 DependencyArg instances to those that would have
8816                 been created by the original select_files() call.
8817                 This allows Package instances to be matched with
8818                 DependencyArg instances during graph creation.
8819                 """
8820                 root_config = self.roots[self.target_root]
8821                 getSetAtoms = root_config.setconfig.getSetAtoms
8822                 sets = root_config.sets
8823                 args = []
8824                 for x in favorites:
8825                         if not isinstance(x, basestring):
8826                                 continue
8827                         if x in ("system", "world"):
8828                                 x = SETPREFIX + x
8829                         if x.startswith(SETPREFIX):
8830                                 s = x[len(SETPREFIX):]
8831                                 if s not in sets:
8832                                         continue
8833                                 if s in self._sets:
8834                                         continue
8835                                 # Recursively expand sets so that containment tests in
8836                                 # self._get_parent_sets() properly match atoms in nested
8837                                 # sets (like if world contains system).
8838                                 expanded_set = InternalPackageSet(
8839                                         initial_atoms=getSetAtoms(s))
8840                                 self._sets[s] = expanded_set
8841                                 args.append(SetArg(arg=x, set=expanded_set,
8842                                         root_config=root_config))
8843                         else:
8844                                 if not portage.isvalidatom(x):
8845                                         continue
8846                                 args.append(AtomArg(arg=x, atom=x,
8847                                         root_config=root_config))
8848
8849                 self._set_args(args)
8850                 return args
8851
8852         class UnsatisfiedResumeDep(portage.exception.PortageException):
8853                 """
8854                 A dependency of a resume list is not installed. This
8855                 can occur when a required package is dropped from the
8856                 merge list via --skipfirst.
8857                 """
8858                 def __init__(self, depgraph, value):
8859                         portage.exception.PortageException.__init__(self, value)
8860                         self.depgraph = depgraph
8861
8862         class _internal_exception(portage.exception.PortageException):
8863                 def __init__(self, value=""):
8864                         portage.exception.PortageException.__init__(self, value)
8865
8866         class _unknown_internal_error(_internal_exception):
8867                 """
8868                 Used by the depgraph internally to terminate graph creation.
8869                 The specific reason for the failure should have been dumped
8870                 to stderr, unfortunately, the exact reason for the failure
8871                 may not be known.
8872                 """
8873
8874         class _serialize_tasks_retry(_internal_exception):
8875                 """
8876                 This is raised by the _serialize_tasks() method when it needs to
8877                 be called again for some reason. The only case that it's currently
8878                 used for is when neglected dependencies need to be added to the
8879                 graph in order to avoid making a potentially unsafe decision.
8880                 """
8881
8882         class _dep_check_composite_db(portage.dbapi):
8883                 """
8884                 A dbapi-like interface that is optimized for use in dep_check() calls.
8885                 This is built on top of the existing depgraph package selection logic.
8886                 Some packages that have been added to the graph may be masked from this
8887                 view in order to influence the atom preference selection that occurs
8888                 via dep_check().
8889                 """
8890                 def __init__(self, depgraph, root):
8891                         portage.dbapi.__init__(self)
8892                         self._depgraph = depgraph
8893                         self._root = root
8894                         self._match_cache = {}
8895                         self._cpv_pkg_map = {}
8896
8897                 def _clear_cache(self):
8898                         self._match_cache.clear()
8899                         self._cpv_pkg_map.clear()
8900
8901                 def match(self, atom):
8902                         ret = self._match_cache.get(atom)
8903                         if ret is not None:
8904                                 return ret[:]
8905                         orig_atom = atom
8906                         if "/" not in atom:
8907                                 atom = self._dep_expand(atom)
8908                         pkg, existing = self._depgraph._select_package(self._root, atom)
8909                         if not pkg:
8910                                 ret = []
8911                         else:
8912                                 # Return the highest available from select_package() as well as
8913                                 # any matching slots in the graph db.
8914                                 slots = set()
8915                                 slots.add(pkg.metadata["SLOT"])
8916                                 atom_cp = portage.dep_getkey(atom)
8917                                 if pkg.cp.startswith("virtual/"):
8918                                         # For new-style virtual lookahead that occurs inside
8919                                         # dep_check(), examine all slots. This is needed
8920                                         # so that newer slots will not unnecessarily be pulled in
8921                                         # when a satisfying lower slot is already installed. For
8922                                         # example, if virtual/jdk-1.4 is satisfied via kaffe then
8923                                         # there's no need to pull in a newer slot to satisfy a
8924                                         # virtual/jdk dependency.
8925                                         for db, pkg_type, built, installed, db_keys in \
8926                                                 self._depgraph._filtered_trees[self._root]["dbs"]:
8927                                                 for cpv in db.match(atom):
8928                                                         if portage.cpv_getkey(cpv) != pkg.cp:
8929                                                                 continue
8930                                                         slots.add(db.aux_get(cpv, ["SLOT"])[0])
8931                                 ret = []
8932                                 if self._visible(pkg):
8933                                         self._cpv_pkg_map[pkg.cpv] = pkg
8934                                         ret.append(pkg.cpv)
8935                                 slots.remove(pkg.metadata["SLOT"])
8936                                 while slots:
8937                                         slot_atom = "%s:%s" % (atom_cp, slots.pop())
8938                                         pkg, existing = self._depgraph._select_package(
8939                                                 self._root, slot_atom)
8940                                         if not pkg:
8941                                                 continue
8942                                         if not self._visible(pkg):
8943                                                 continue
8944                                         self._cpv_pkg_map[pkg.cpv] = pkg
8945                                         ret.append(pkg.cpv)
8946                                 if ret:
8947                                         self._cpv_sort_ascending(ret)
8948                         self._match_cache[orig_atom] = ret
8949                         return ret[:]
8950
8951                 def _visible(self, pkg):
8952                         if pkg.installed and "selective" not in self._depgraph.myparams:
8953                                 try:
8954                                         arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
8955                                 except (StopIteration, portage.exception.InvalidDependString):
8956                                         arg = None
8957                                 if arg:
8958                                         return False
8959                         if pkg.installed:
8960                                 try:
8961                                         if not visible(
8962                                                 self._depgraph.pkgsettings[pkg.root], pkg):
8963                                                 return False
8964                                 except portage.exception.InvalidDependString:
8965                                         pass
8966                         in_graph = self._depgraph._slot_pkg_map[
8967                                 self._root].get(pkg.slot_atom)
8968                         if in_graph is None:
8969                                 # Mask choices for packages which are not the highest visible
8970                                 # version within their slot (since they usually trigger slot
8971                                 # conflicts).
8972                                 highest_visible, in_graph = self._depgraph._select_package(
8973                                         self._root, pkg.slot_atom)
8974                                 if pkg != highest_visible:
8975                                         return False
8976                         elif in_graph != pkg:
8977                                 # Mask choices for packages that would trigger a slot
8978                                 # conflict with a previously selected package.
8979                                 return False
8980                         return True
8981
8982                 def _dep_expand(self, atom):
8983                         """
8984                         This is only needed for old installed packages that may
8985                         contain atoms that are not fully qualified with a specific
8986                         category. Emulate the cpv_expand() function that's used by
8987                         dbapi.match() in cases like this. If there are multiple
8988                         matches, it's often due to a new-style virtual that has
8989                         been added, so try to filter those out to avoid raising
8990                         a ValueError.
8991                         """
8992                         root_config = self._depgraph.roots[self._root]
8993                         orig_atom = atom
8994                         expanded_atoms = self._depgraph._dep_expand(root_config, atom)
8995                         if len(expanded_atoms) > 1:
8996                                 non_virtual_atoms = []
8997                                 for x in expanded_atoms:
8998                                         if not portage.dep_getkey(x).startswith("virtual/"):
8999                                                 non_virtual_atoms.append(x)
9000                                 if len(non_virtual_atoms) == 1:
9001                                         expanded_atoms = non_virtual_atoms
9002                         if len(expanded_atoms) > 1:
9003                                 # compatible with portage.cpv_expand()
9004                                 raise portage.exception.AmbiguousPackageName(
9005                                         [portage.dep_getkey(x) for x in expanded_atoms])
9006                         if expanded_atoms:
9007                                 atom = expanded_atoms[0]
9008                         else:
9009                                 null_atom = insert_category_into_atom(atom, "null")
9010                                 null_cp = portage.dep_getkey(null_atom)
9011                                 cat, atom_pn = portage.catsplit(null_cp)
9012                                 virts_p = root_config.settings.get_virts_p().get(atom_pn)
9013                                 if virts_p:
9014                                         # Allow the resolver to choose which virtual.
9015                                         atom = insert_category_into_atom(atom, "virtual")
9016                                 else:
9017                                         atom = insert_category_into_atom(atom, "null")
9018                         return atom
9019
9020                 def aux_get(self, cpv, wants):
9021                         metadata = self._cpv_pkg_map[cpv].metadata
9022                         return [metadata.get(x, "") for x in wants]
9023
9024 class RepoDisplay(object):
9025         def __init__(self, roots):
9026                 self._shown_repos = {}
9027                 self._unknown_repo = False
9028                 repo_paths = set()
9029                 for root_config in roots.itervalues():
9030                         portdir = root_config.settings.get("PORTDIR")
9031                         if portdir:
9032                                 repo_paths.add(portdir)
9033                         overlays = root_config.settings.get("PORTDIR_OVERLAY")
9034                         if overlays:
9035                                 repo_paths.update(overlays.split())
9036                 repo_paths = list(repo_paths)
9037                 self._repo_paths = repo_paths
9038                 self._repo_paths_real = [ os.path.realpath(repo_path) \
9039                         for repo_path in repo_paths ]
9040
9041                 # pre-allocate index for PORTDIR so that it always has index 0.
9042                 for root_config in roots.itervalues():
9043                         portdb = root_config.trees["porttree"].dbapi
9044                         portdir = portdb.porttree_root
9045                         if portdir:
9046                                 self.repoStr(portdir)
9047
9048         def repoStr(self, repo_path_real):
9049                 real_index = -1
9050                 if repo_path_real:
9051                         real_index = self._repo_paths_real.index(repo_path_real)
9052                 if real_index == -1:
9053                         s = "?"
9054                         self._unknown_repo = True
9055                 else:
9056                         shown_repos = self._shown_repos
9057                         repo_paths = self._repo_paths
9058                         repo_path = repo_paths[real_index]
9059                         index = shown_repos.get(repo_path)
9060                         if index is None:
9061                                 index = len(shown_repos)
9062                                 shown_repos[repo_path] = index
9063                         s = str(index)
9064                 return s
9065
9066         def __str__(self):
9067                 output = []
9068                 shown_repos = self._shown_repos
9069                 unknown_repo = self._unknown_repo
9070                 if shown_repos or self._unknown_repo:
9071                         output.append("Portage tree and overlays:\n")
9072                 show_repo_paths = list(shown_repos)
9073                 for repo_path, repo_index in shown_repos.iteritems():
9074                         show_repo_paths[repo_index] = repo_path
9075                 if show_repo_paths:
9076                         for index, repo_path in enumerate(show_repo_paths):
9077                                 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9078                 if unknown_repo:
9079                         output.append(" "+teal("[?]") + \
9080                                 " indicates that the source repository could not be determined\n")
9081                 return "".join(output)
9082
9083 class PackageCounters(object):
9084
9085         def __init__(self):
9086                 self.upgrades   = 0
9087                 self.downgrades = 0
9088                 self.new        = 0
9089                 self.newslot    = 0
9090                 self.reinst     = 0
9091                 self.uninst     = 0
9092                 self.blocks     = 0
9093                 self.blocks_satisfied         = 0
9094                 self.totalsize  = 0
9095                 self.restrict_fetch           = 0
9096                 self.restrict_fetch_satisfied = 0
9097                 self.interactive              = 0
9098
9099         def __str__(self):
9100                 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
9101                 myoutput = []
9102                 details = []
9103                 myoutput.append("Total: %s package" % total_installs)
9104                 if total_installs != 1:
9105                         myoutput.append("s")
9106                 if total_installs != 0:
9107                         myoutput.append(" (")
9108                 if self.upgrades > 0:
9109                         details.append("%s upgrade" % self.upgrades)
9110                         if self.upgrades > 1:
9111                                 details[-1] += "s"
9112                 if self.downgrades > 0:
9113                         details.append("%s downgrade" % self.downgrades)
9114                         if self.downgrades > 1:
9115                                 details[-1] += "s"
9116                 if self.new > 0:
9117                         details.append("%s new" % self.new)
9118                 if self.newslot > 0:
9119                         details.append("%s in new slot" % self.newslot)
9120                         if self.newslot > 1:
9121                                 details[-1] += "s"
9122                 if self.reinst > 0:
9123                         details.append("%s reinstall" % self.reinst)
9124                         if self.reinst > 1:
9125                                 details[-1] += "s"
9126                 if self.uninst > 0:
9127                         details.append("%s uninstall" % self.uninst)
9128                         if self.uninst > 1:
9129                                 details[-1] += "s"
9130                 if self.interactive > 0:
9131                         details.append("%s %s" % (self.interactive,
9132                                 colorize("WARN", "interactive")))
9133                 myoutput.append(", ".join(details))
9134                 if total_installs != 0:
9135                         myoutput.append(")")
9136                 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
9137                 if self.restrict_fetch:
9138                         myoutput.append("\nFetch Restriction: %s package" % \
9139                                 self.restrict_fetch)
9140                         if self.restrict_fetch > 1:
9141                                 myoutput.append("s")
9142                 if self.restrict_fetch_satisfied < self.restrict_fetch:
9143                         myoutput.append(bad(" (%s unsatisfied)") % \
9144                                 (self.restrict_fetch - self.restrict_fetch_satisfied))
9145                 if self.blocks > 0:
9146                         myoutput.append("\nConflict: %s block" % \
9147                                 self.blocks)
9148                         if self.blocks > 1:
9149                                 myoutput.append("s")
9150                         if self.blocks_satisfied < self.blocks:
9151                                 myoutput.append(bad(" (%s unsatisfied)") % \
9152                                         (self.blocks - self.blocks_satisfied))
9153                 return "".join(myoutput)
9154
9155 class PollSelectAdapter(PollConstants):
9156
9157         """
9158         Use select to emulate a poll object, for
9159         systems that don't support poll().
9160         """
9161
9162         def __init__(self):
9163                 self._registered = {}
9164                 self._select_args = [[], [], []]
9165
9166         def register(self, fd, *args):
9167                 """
9168                 Only POLLIN is currently supported!
9169                 """
9170                 if len(args) > 1:
9171                         raise TypeError(
9172                                 "register expected at most 2 arguments, got " + \
9173                                 repr(1 + len(args)))
9174
9175                 eventmask = PollConstants.POLLIN | \
9176                         PollConstants.POLLPRI | PollConstants.POLLOUT
9177                 if args:
9178                         eventmask = args[0]
9179
9180                 self._registered[fd] = eventmask
9181                 self._select_args = None
9182
9183         def unregister(self, fd):
9184                 self._select_args = None
9185                 del self._registered[fd]
9186
9187         def poll(self, *args):
9188                 if len(args) > 1:
9189                         raise TypeError(
9190                                 "poll expected at most 2 arguments, got " + \
9191                                 repr(1 + len(args)))
9192
9193                 timeout = None
9194                 if args:
9195                         timeout = args[0]
9196
9197                 select_args = self._select_args
9198                 if select_args is None:
9199                         select_args = [self._registered.keys(), [], []]
9200
9201                 if timeout is not None:
9202                         select_args = select_args[:]
9203                         # Translate poll() timeout args to select() timeout args:
9204                         #
9205                         #          | units        | value(s) for indefinite block
9206                         # ---------|--------------|------------------------------
9207                         #   poll   | milliseconds | omitted, negative, or None
9208                         # ---------|--------------|------------------------------
9209                         #   select | seconds      | omitted
9210                         # ---------|--------------|------------------------------
9211
9212                         if timeout is not None and timeout < 0:
9213                                 timeout = None
9214                         if timeout is not None:
9215                                 select_args.append(timeout / 1000)
9216
9217                 select_events = select.select(*select_args)
9218                 poll_events = []
9219                 for fd in select_events[0]:
9220                         poll_events.append((fd, PollConstants.POLLIN))
9221                 return poll_events
9222
9223 class SequentialTaskQueue(SlotObject):
9224
9225         __slots__ = ("max_jobs", "running_tasks") + \
9226                 ("_dirty", "_scheduling", "_task_queue")
9227
9228         def __init__(self, **kwargs):
9229                 SlotObject.__init__(self, **kwargs)
9230                 self._task_queue = deque()
9231                 self.running_tasks = set()
9232                 if self.max_jobs is None:
9233                         self.max_jobs = 1
9234                 self._dirty = True
9235
9236         def add(self, task):
9237                 self._task_queue.append(task)
9238                 self._dirty = True
9239
9240         def addFront(self, task):
9241                 self._task_queue.appendleft(task)
9242                 self._dirty = True
9243
9244         def schedule(self):
9245
9246                 if not self._dirty:
9247                         return False
9248
9249                 if not self:
9250                         return False
9251
9252                 if self._scheduling:
9253                         # Ignore any recursive schedule() calls triggered via
9254                         # self._task_exit().
9255                         return False
9256
9257                 self._scheduling = True
9258
9259                 task_queue = self._task_queue
9260                 running_tasks = self.running_tasks
9261                 max_jobs = self.max_jobs
9262                 state_changed = False
9263
9264                 while task_queue and \
9265                         (max_jobs is True or len(running_tasks) < max_jobs):
9266                         task = task_queue.popleft()
9267                         cancelled = getattr(task, "cancelled", None)
9268                         if not cancelled:
9269                                 running_tasks.add(task)
9270                                 task.addExitListener(self._task_exit)
9271                                 task.start()
9272                         state_changed = True
9273
9274                 self._dirty = False
9275                 self._scheduling = False
9276
9277                 return state_changed
9278
9279         def _task_exit(self, task):
9280                 """
9281                 Since we can always rely on exit listeners being called, the set of
9282                 running tasks is always pruned automatically and there is never any need
9283                 to actively prune it.
9284                 """
9285                 self.running_tasks.remove(task)
9286                 if self._task_queue:
9287                         self._dirty = True
9288
9289         def clear(self):
9290                 self._task_queue.clear()
9291                 running_tasks = self.running_tasks
9292                 while running_tasks:
9293                         task = running_tasks.pop()
9294                         task.removeExitListener(self._task_exit)
9295                         task.cancel()
9296                 self._dirty = False
9297
9298         def __nonzero__(self):
9299                 return bool(self._task_queue or self.running_tasks)
9300
9301         def __len__(self):
9302                 return len(self._task_queue) + len(self.running_tasks)
9303
9304 _can_poll_device = None
9305
9306 def can_poll_device():
9307         """
9308         Test if it's possible to use poll() on a device such as a pty. This
9309         is known to fail on Darwin.
9310         @rtype: bool
9311         @returns: True if poll() on a device succeeds, False otherwise.
9312         """
9313
9314         global _can_poll_device
9315         if _can_poll_device is not None:
9316                 return _can_poll_device
9317
9318         if not hasattr(select, "poll"):
9319                 _can_poll_device = False
9320                 return _can_poll_device
9321
9322         try:
9323                 dev_null = open('/dev/null', 'rb')
9324         except IOError:
9325                 _can_poll_device = False
9326                 return _can_poll_device
9327
9328         p = select.poll()
9329         p.register(dev_null.fileno(), PollConstants.POLLIN)
9330
9331         invalid_request = False
9332         for f, event in p.poll():
9333                 if event & PollConstants.POLLNVAL:
9334                         invalid_request = True
9335                         break
9336         dev_null.close()
9337
9338         _can_poll_device = not invalid_request
9339         return _can_poll_device
9340
9341 def create_poll_instance():
9342         """
9343         Create an instance of select.poll, or an instance of
9344         PollSelectAdapter there is no poll() implementation or
9345         it is broken somehow.
9346         """
9347         if can_poll_device():
9348                 return select.poll()
9349         return PollSelectAdapter()
9350
9351 getloadavg = getattr(os, "getloadavg", None)
9352 if getloadavg is None:
9353         def getloadavg():
9354                 """
9355                 Uses /proc/loadavg to emulate os.getloadavg().
9356                 Raises OSError if the load average was unobtainable.
9357                 """
9358                 try:
9359                         loadavg_str = open('/proc/loadavg').readline()
9360                 except IOError:
9361                         # getloadavg() is only supposed to raise OSError, so convert
9362                         raise OSError('unknown')
9363                 loadavg_split = loadavg_str.split()
9364                 if len(loadavg_split) < 3:
9365                         raise OSError('unknown')
9366                 loadavg_floats = []
9367                 for i in xrange(3):
9368                         try:
9369                                 loadavg_floats.append(float(loadavg_split[i]))
9370                         except ValueError:
9371                                 raise OSError('unknown')
9372                 return tuple(loadavg_floats)
9373
9374 class PollScheduler(object):
9375
9376         class _sched_iface_class(SlotObject):
9377                 __slots__ = ("register", "schedule", "unregister")
9378
9379         def __init__(self):
9380                 self._max_jobs = 1
9381                 self._max_load = None
9382                 self._jobs = 0
9383                 self._poll_event_queue = []
9384                 self._poll_event_handlers = {}
9385                 self._poll_event_handler_ids = {}
9386                 # Increment id for each new handler.
9387                 self._event_handler_id = 0
9388                 self._poll_obj = create_poll_instance()
9389                 self._scheduling = False
9390
9391         def _schedule(self):
9392                 """
9393                 Calls _schedule_tasks() and automatically returns early from
9394                 any recursive calls to this method that the _schedule_tasks()
9395                 call might trigger. This makes _schedule() safe to call from
9396                 inside exit listeners.
9397                 """
9398                 if self._scheduling:
9399                         return False
9400                 self._scheduling = True
9401                 try:
9402                         return self._schedule_tasks()
9403                 finally:
9404                         self._scheduling = False
9405
9406         def _running_job_count(self):
9407                 return self._jobs
9408
9409         def _can_add_job(self):
9410                 max_jobs = self._max_jobs
9411                 max_load = self._max_load
9412
9413                 if self._max_jobs is not True and \
9414                         self._running_job_count() >= self._max_jobs:
9415                         return False
9416
9417                 if max_load is not None and \
9418                         (max_jobs is True or max_jobs > 1) and \
9419                         self._running_job_count() >= 1:
9420                         try:
9421                                 avg1, avg5, avg15 = getloadavg()
9422                         except OSError:
9423                                 return False
9424
9425                         if avg1 >= max_load:
9426                                 return False
9427
9428                 return True
9429
9430         def _poll(self, timeout=None):
9431                 """
9432                 All poll() calls pass through here. The poll events
9433                 are added directly to self._poll_event_queue.
9434                 In order to avoid endless blocking, this raises
9435                 StopIteration if timeout is None and there are
9436                 no file descriptors to poll.
9437                 """
9438                 if not self._poll_event_handlers:
9439                         self._schedule()
9440                         if timeout is None and \
9441                                 not self._poll_event_handlers:
9442                                 raise StopIteration(
9443                                         "timeout is None and there are no poll() event handlers")
9444
9445                 # The following error is known to occur with Linux kernel versions
9446                 # less than 2.6.24:
9447                 #
9448                 #   select.error: (4, 'Interrupted system call')
9449                 #
9450                 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9451                 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9452                 # without any events.
9453                 while True:
9454                         try:
9455                                 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9456                                 break
9457                         except select.error, e:
9458                                 writemsg_level("\n!!! select error: %s\n" % (e,),
9459                                         level=logging.ERROR, noiselevel=-1)
9460                                 del e
9461                                 if timeout is not None:
9462                                         break
9463
9464         def _next_poll_event(self, timeout=None):
9465                 """
9466                 Since the _schedule_wait() loop is called by event
9467                 handlers from _poll_loop(), maintain a central event
9468                 queue for both of them to share events from a single
9469                 poll() call. In order to avoid endless blocking, this
9470                 raises StopIteration if timeout is None and there are
9471                 no file descriptors to poll.
9472                 """
9473                 if not self._poll_event_queue:
9474                         self._poll(timeout)
9475                 return self._poll_event_queue.pop()
9476
9477         def _poll_loop(self):
9478
9479                 event_handlers = self._poll_event_handlers
9480                 event_handled = False
9481
9482                 try:
9483                         while event_handlers:
9484                                 f, event = self._next_poll_event()
9485                                 handler, reg_id = event_handlers[f]
9486                                 handler(f, event)
9487                                 event_handled = True
9488                 except StopIteration:
9489                         event_handled = True
9490
9491                 if not event_handled:
9492                         raise AssertionError("tight loop")
9493
9494         def _schedule_yield(self):
9495                 """
9496                 Schedule for a short period of time chosen by the scheduler based
9497                 on internal state. Synchronous tasks should call this periodically
9498                 in order to allow the scheduler to service pending poll events. The
9499                 scheduler will call poll() exactly once, without blocking, and any
9500                 resulting poll events will be serviced.
9501                 """
9502                 event_handlers = self._poll_event_handlers
9503                 events_handled = 0
9504
9505                 if not event_handlers:
9506                         return bool(events_handled)
9507
9508                 if not self._poll_event_queue:
9509                         self._poll(0)
9510
9511                 try:
9512                         while event_handlers and self._poll_event_queue:
9513                                 f, event = self._next_poll_event()
9514                                 handler, reg_id = event_handlers[f]
9515                                 handler(f, event)
9516                                 events_handled += 1
9517                 except StopIteration:
9518                         events_handled += 1
9519
9520                 return bool(events_handled)
9521
9522         def _register(self, f, eventmask, handler):
9523                 """
9524                 @rtype: Integer
9525                 @return: A unique registration id, for use in schedule() or
9526                         unregister() calls.
9527                 """
9528                 if f in self._poll_event_handlers:
9529                         raise AssertionError("fd %d is already registered" % f)
9530                 self._event_handler_id += 1
9531                 reg_id = self._event_handler_id
9532                 self._poll_event_handler_ids[reg_id] = f
9533                 self._poll_event_handlers[f] = (handler, reg_id)
9534                 self._poll_obj.register(f, eventmask)
9535                 return reg_id
9536
9537         def _unregister(self, reg_id):
9538                 f = self._poll_event_handler_ids[reg_id]
9539                 self._poll_obj.unregister(f)
9540                 del self._poll_event_handlers[f]
9541                 del self._poll_event_handler_ids[reg_id]
9542
9543         def _schedule_wait(self, wait_ids):
9544                 """
9545                 Schedule until wait_id is not longer registered
9546                 for poll() events.
9547                 @type wait_id: int
9548                 @param wait_id: a task id to wait for
9549                 """
9550                 event_handlers = self._poll_event_handlers
9551                 handler_ids = self._poll_event_handler_ids
9552                 event_handled = False
9553
9554                 if isinstance(wait_ids, int):
9555                         wait_ids = frozenset([wait_ids])
9556
9557                 try:
9558                         while wait_ids.intersection(handler_ids):
9559                                 f, event = self._next_poll_event()
9560                                 handler, reg_id = event_handlers[f]
9561                                 handler(f, event)
9562                                 event_handled = True
9563                 except StopIteration:
9564                         event_handled = True
9565
9566                 return event_handled
9567
9568 class QueueScheduler(PollScheduler):
9569
9570         """
9571         Add instances of SequentialTaskQueue and then call run(). The
9572         run() method returns when no tasks remain.
9573         """
9574
9575         def __init__(self, max_jobs=None, max_load=None):
9576                 PollScheduler.__init__(self)
9577
9578                 if max_jobs is None:
9579                         max_jobs = 1
9580
9581                 self._max_jobs = max_jobs
9582                 self._max_load = max_load
9583                 self.sched_iface = self._sched_iface_class(
9584                         register=self._register,
9585                         schedule=self._schedule_wait,
9586                         unregister=self._unregister)
9587
9588                 self._queues = []
9589                 self._schedule_listeners = []
9590
9591         def add(self, q):
9592                 self._queues.append(q)
9593
9594         def remove(self, q):
9595                 self._queues.remove(q)
9596
9597         def run(self):
9598
9599                 while self._schedule():
9600                         self._poll_loop()
9601
9602                 while self._running_job_count():
9603                         self._poll_loop()
9604
9605         def _schedule_tasks(self):
9606                 """
9607                 @rtype: bool
9608                 @returns: True if there may be remaining tasks to schedule,
9609                         False otherwise.
9610                 """
9611                 while self._can_add_job():
9612                         n = self._max_jobs - self._running_job_count()
9613                         if n < 1:
9614                                 break
9615
9616                         if not self._start_next_job(n):
9617                                 return False
9618
9619                 for q in self._queues:
9620                         if q:
9621                                 return True
9622                 return False
9623
9624         def _running_job_count(self):
9625                 job_count = 0
9626                 for q in self._queues:
9627                         job_count += len(q.running_tasks)
9628                 self._jobs = job_count
9629                 return job_count
9630
9631         def _start_next_job(self, n=1):
9632                 started_count = 0
9633                 for q in self._queues:
9634                         initial_job_count = len(q.running_tasks)
9635                         q.schedule()
9636                         final_job_count = len(q.running_tasks)
9637                         if final_job_count > initial_job_count:
9638                                 started_count += (final_job_count - initial_job_count)
9639                         if started_count >= n:
9640                                 break
9641                 return started_count
9642
9643 class TaskScheduler(object):
9644
9645         """
9646         A simple way to handle scheduling of AsynchrousTask instances. Simply
9647         add tasks and call run(). The run() method returns when no tasks remain.
9648         """
9649
9650         def __init__(self, max_jobs=None, max_load=None):
9651                 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9652                 self._scheduler = QueueScheduler(
9653                         max_jobs=max_jobs, max_load=max_load)
9654                 self.sched_iface = self._scheduler.sched_iface
9655                 self.run = self._scheduler.run
9656                 self._scheduler.add(self._queue)
9657
9658         def add(self, task):
9659                 self._queue.add(task)
9660
9661 class JobStatusDisplay(object):
9662
9663         _bound_properties = ("curval", "failed", "running")
9664         _jobs_column_width = 48
9665
9666         # Don't update the display unless at least this much
9667         # time has passed, in units of seconds.
9668         _min_display_latency = 2
9669
9670         _default_term_codes = {
9671                 'cr'  : '\r',
9672                 'el'  : '\x1b[K',
9673                 'nel' : '\n',
9674         }
9675
9676         _termcap_name_map = {
9677                 'carriage_return' : 'cr',
9678                 'clr_eol'         : 'el',
9679                 'newline'         : 'nel',
9680         }
9681
9682         def __init__(self, out=sys.stdout, quiet=False):
9683                 object.__setattr__(self, "out", out)
9684                 object.__setattr__(self, "quiet", quiet)
9685                 object.__setattr__(self, "maxval", 0)
9686                 object.__setattr__(self, "merges", 0)
9687                 object.__setattr__(self, "_changed", False)
9688                 object.__setattr__(self, "_displayed", False)
9689                 object.__setattr__(self, "_last_display_time", 0)
9690                 object.__setattr__(self, "width", 80)
9691                 self.reset()
9692
9693                 isatty = hasattr(out, "isatty") and out.isatty()
9694                 object.__setattr__(self, "_isatty", isatty)
9695                 if not isatty or not self._init_term():
9696                         term_codes = {}
9697                         for k, capname in self._termcap_name_map.iteritems():
9698                                 term_codes[k] = self._default_term_codes[capname]
9699                         object.__setattr__(self, "_term_codes", term_codes)
9700                 encoding = sys.getdefaultencoding()
9701                 for k, v in self._term_codes.items():
9702                         if not isinstance(v, str):
9703                                 self._term_codes[k] = v.decode(encoding, 'replace')
9704
9705         def _init_term(self):
9706                 """
9707                 Initialize term control codes.
9708                 @rtype: bool
9709                 @returns: True if term codes were successfully initialized,
9710                         False otherwise.
9711                 """
9712
9713                 term_type = os.environ.get("TERM", "vt100")
9714                 tigetstr = None
9715
9716                 try:
9717                         import curses
9718                         try:
9719                                 curses.setupterm(term_type, self.out.fileno())
9720                                 tigetstr = curses.tigetstr
9721                         except curses.error:
9722                                 pass
9723                 except ImportError:
9724                         pass
9725
9726                 if tigetstr is None:
9727                         return False
9728
9729                 term_codes = {}
9730                 for k, capname in self._termcap_name_map.iteritems():
9731                         code = tigetstr(capname)
9732                         if code is None:
9733                                 code = self._default_term_codes[capname]
9734                         term_codes[k] = code
9735                 object.__setattr__(self, "_term_codes", term_codes)
9736                 return True
9737
9738         def _format_msg(self, msg):
9739                 return ">>> %s" % msg
9740
9741         def _erase(self):
9742                 self.out.write(
9743                         self._term_codes['carriage_return'] + \
9744                         self._term_codes['clr_eol'])
9745                 self.out.flush()
9746                 self._displayed = False
9747
9748         def _display(self, line):
9749                 self.out.write(line)
9750                 self.out.flush()
9751                 self._displayed = True
9752
9753         def _update(self, msg):
9754
9755                 out = self.out
9756                 if not self._isatty:
9757                         out.write(self._format_msg(msg) + self._term_codes['newline'])
9758                         self.out.flush()
9759                         self._displayed = True
9760                         return
9761
9762                 if self._displayed:
9763                         self._erase()
9764
9765                 self._display(self._format_msg(msg))
9766
9767         def displayMessage(self, msg):
9768
9769                 was_displayed = self._displayed
9770
9771                 if self._isatty and self._displayed:
9772                         self._erase()
9773
9774                 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9775                 self.out.flush()
9776                 self._displayed = False
9777
9778                 if was_displayed:
9779                         self._changed = True
9780                         self.display()
9781
9782         def reset(self):
9783                 self.maxval = 0
9784                 self.merges = 0
9785                 for name in self._bound_properties:
9786                         object.__setattr__(self, name, 0)
9787
9788                 if self._displayed:
9789                         self.out.write(self._term_codes['newline'])
9790                         self.out.flush()
9791                         self._displayed = False
9792
9793         def __setattr__(self, name, value):
9794                 old_value = getattr(self, name)
9795                 if value == old_value:
9796                         return
9797                 object.__setattr__(self, name, value)
9798                 if name in self._bound_properties:
9799                         self._property_change(name, old_value, value)
9800
9801         def _property_change(self, name, old_value, new_value):
9802                 self._changed = True
9803                 self.display()
9804
9805         def _load_avg_str(self):
9806                 try:
9807                         avg = getloadavg()
9808                 except OSError:
9809                         return 'unknown'
9810
9811                 max_avg = max(avg)
9812
9813                 if max_avg < 10:
9814                         digits = 2
9815                 elif max_avg < 100:
9816                         digits = 1
9817                 else:
9818                         digits = 0
9819
9820                 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9821
9822         def display(self):
9823                 """
9824                 Display status on stdout, but only if something has
9825                 changed since the last call.
9826                 """
9827
9828                 if self.quiet:
9829                         return
9830
9831                 current_time = time.time()
9832                 time_delta = current_time - self._last_display_time
9833                 if self._displayed and \
9834                         not self._changed:
9835                         if not self._isatty:
9836                                 return
9837                         if time_delta < self._min_display_latency:
9838                                 return
9839
9840                 self._last_display_time = current_time
9841                 self._changed = False
9842                 self._display_status()
9843
9844         def _display_status(self):
9845                 # Don't use len(self._completed_tasks) here since that also
9846                 # can include uninstall tasks.
9847                 curval_str = str(self.curval)
9848                 maxval_str = str(self.maxval)
9849                 running_str = str(self.running)
9850                 failed_str = str(self.failed)
9851                 load_avg_str = self._load_avg_str()
9852
9853                 color_output = StringIO()
9854                 plain_output = StringIO()
9855                 style_file = portage.output.ConsoleStyleFile(color_output)
9856                 style_file.write_listener = plain_output
9857                 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9858                 style_writer.style_listener = style_file.new_styles
9859                 f = formatter.AbstractFormatter(style_writer)
9860
9861                 number_style = "INFORM"
9862                 f.add_literal_data("Jobs: ")
9863                 f.push_style(number_style)
9864                 f.add_literal_data(curval_str)
9865                 f.pop_style()
9866                 f.add_literal_data(" of ")
9867                 f.push_style(number_style)
9868                 f.add_literal_data(maxval_str)
9869                 f.pop_style()
9870                 f.add_literal_data(" complete")
9871
9872                 if self.running:
9873                         f.add_literal_data(", ")
9874                         f.push_style(number_style)
9875                         f.add_literal_data(running_str)
9876                         f.pop_style()
9877                         f.add_literal_data(" running")
9878
9879                 if self.failed:
9880                         f.add_literal_data(", ")
9881                         f.push_style(number_style)
9882                         f.add_literal_data(failed_str)
9883                         f.pop_style()
9884                         f.add_literal_data(" failed")
9885
9886                 padding = self._jobs_column_width - len(plain_output.getvalue())
9887                 if padding > 0:
9888                         f.add_literal_data(padding * " ")
9889
9890                 f.add_literal_data("Load avg: ")
9891                 f.add_literal_data(load_avg_str)
9892
9893                 # Truncate to fit width, to avoid making the terminal scroll if the
9894                 # line overflows (happens when the load average is large).
9895                 plain_output = plain_output.getvalue()
9896                 if self._isatty and len(plain_output) > self.width:
9897                         # Use plain_output here since it's easier to truncate
9898                         # properly than the color output which contains console
9899                         # color codes.
9900                         self._update(plain_output[:self.width])
9901                 else:
9902                         self._update(color_output.getvalue())
9903
9904                 xtermTitle(" ".join(plain_output.split()))
9905
9906 class Scheduler(PollScheduler):
9907
9908         _opts_ignore_blockers = \
9909                 frozenset(["--buildpkgonly",
9910                 "--fetchonly", "--fetch-all-uri",
9911                 "--nodeps", "--pretend"])
9912
9913         _opts_no_background = \
9914                 frozenset(["--pretend",
9915                 "--fetchonly", "--fetch-all-uri"])
9916
9917         _opts_no_restart = frozenset(["--buildpkgonly",
9918                 "--fetchonly", "--fetch-all-uri", "--pretend"])
9919
9920         _bad_resume_opts = set(["--ask", "--changelog",
9921                 "--resume", "--skipfirst"])
9922
9923         _fetch_log = "/var/log/emerge-fetch.log"
9924
9925         class _iface_class(SlotObject):
9926                 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
9927                         "dblinkElog", "fetch", "register", "schedule",
9928                         "scheduleSetup", "scheduleUnpack", "scheduleYield",
9929                         "unregister")
9930
9931         class _fetch_iface_class(SlotObject):
9932                 __slots__ = ("log_file", "schedule")
9933
9934         _task_queues_class = slot_dict_class(
9935                 ("merge", "jobs", "fetch", "unpack"), prefix="")
9936
9937         class _build_opts_class(SlotObject):
9938                 __slots__ = ("buildpkg", "buildpkgonly",
9939                         "fetch_all_uri", "fetchonly", "pretend")
9940
9941         class _binpkg_opts_class(SlotObject):
9942                 __slots__ = ("fetchonly", "getbinpkg", "pretend")
9943
9944         class _pkg_count_class(SlotObject):
9945                 __slots__ = ("curval", "maxval")
9946
9947         class _emerge_log_class(SlotObject):
9948                 __slots__ = ("xterm_titles",)
9949
9950                 def log(self, *pargs, **kwargs):
9951                         if not self.xterm_titles:
9952                                 # Avoid interference with the scheduler's status display.
9953                                 kwargs.pop("short_msg", None)
9954                         emergelog(self.xterm_titles, *pargs, **kwargs)
9955
9956         class _failed_pkg(SlotObject):
9957                 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
9958
9959         class _ConfigPool(object):
9960                 """Interface for a task to temporarily allocate a config
9961                 instance from a pool. This allows a task to be constructed
9962                 long before the config instance actually becomes needed, like
9963                 when prefetchers are constructed for the whole merge list."""
9964                 __slots__ = ("_root", "_allocate", "_deallocate")
9965                 def __init__(self, root, allocate, deallocate):
9966                         self._root = root
9967                         self._allocate = allocate
9968                         self._deallocate = deallocate
9969                 def allocate(self):
9970                         return self._allocate(self._root)
9971                 def deallocate(self, settings):
9972                         self._deallocate(settings)
9973
9974         class _unknown_internal_error(portage.exception.PortageException):
9975                 """
9976                 Used internally to terminate scheduling. The specific reason for
9977                 the failure should have been dumped to stderr.
9978                 """
9979                 def __init__(self, value=""):
9980                         portage.exception.PortageException.__init__(self, value)
9981
9982         def __init__(self, settings, trees, mtimedb, myopts,
9983                 spinner, mergelist, favorites, digraph):
9984                 PollScheduler.__init__(self)
9985                 self.settings = settings
9986                 self.target_root = settings["ROOT"]
9987                 self.trees = trees
9988                 self.myopts = myopts
9989                 self._spinner = spinner
9990                 self._mtimedb = mtimedb
9991                 self._mergelist = mergelist
9992                 self._favorites = favorites
9993                 self._args_set = InternalPackageSet(favorites)
9994                 self._build_opts = self._build_opts_class()
9995                 for k in self._build_opts.__slots__:
9996                         setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
9997                 self._binpkg_opts = self._binpkg_opts_class()
9998                 for k in self._binpkg_opts.__slots__:
9999                         setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
10000
10001                 self.curval = 0
10002                 self._logger = self._emerge_log_class()
10003                 self._task_queues = self._task_queues_class()
10004                 for k in self._task_queues.allowed_keys:
10005                         setattr(self._task_queues, k,
10006                                 SequentialTaskQueue())
10007
10008                 # Holds merges that will wait to be executed when no builds are
10009                 # executing. This is useful for system packages since dependencies
10010                 # on system packages are frequently unspecified.
10011                 self._merge_wait_queue = []
10012                 # Holds merges that have been transfered from the merge_wait_queue to
10013                 # the actual merge queue. They are removed from this list upon
10014                 # completion. Other packages can start building only when this list is
10015                 # empty.
10016                 self._merge_wait_scheduled = []
10017
10018                 # Holds system packages and their deep runtime dependencies. Before
10019                 # being merged, these packages go to merge_wait_queue, to be merged
10020                 # when no other packages are building.
10021                 self._deep_system_deps = set()
10022
10023                 self._status_display = JobStatusDisplay()
10024                 self._max_load = myopts.get("--load-average")
10025                 max_jobs = myopts.get("--jobs")
10026                 if max_jobs is None:
10027                         max_jobs = 1
10028                 self._set_max_jobs(max_jobs)
10029
10030                 # The root where the currently running
10031                 # portage instance is installed.
10032                 self._running_root = trees["/"]["root_config"]
10033                 self.edebug = 0
10034                 if settings.get("PORTAGE_DEBUG", "") == "1":
10035                         self.edebug = 1
10036                 self.pkgsettings = {}
10037                 self._config_pool = {}
10038                 self._blocker_db = {}
10039                 for root in trees:
10040                         self._config_pool[root] = []
10041                         self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
10042
10043                 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
10044                         schedule=self._schedule_fetch)
10045                 self._sched_iface = self._iface_class(
10046                         dblinkEbuildPhase=self._dblink_ebuild_phase,
10047                         dblinkDisplayMerge=self._dblink_display_merge,
10048                         dblinkElog=self._dblink_elog,
10049                         fetch=fetch_iface, register=self._register,
10050                         schedule=self._schedule_wait,
10051                         scheduleSetup=self._schedule_setup,
10052                         scheduleUnpack=self._schedule_unpack,
10053                         scheduleYield=self._schedule_yield,
10054                         unregister=self._unregister)
10055
10056                 self._prefetchers = weakref.WeakValueDictionary()
10057                 self._pkg_queue = []
10058                 self._completed_tasks = set()
10059
10060                 self._failed_pkgs = []
10061                 self._failed_pkgs_all = []
10062                 self._failed_pkgs_die_msgs = []
10063                 self._post_mod_echo_msgs = []
10064                 self._parallel_fetch = False
10065                 merge_count = len([x for x in mergelist \
10066                         if isinstance(x, Package) and x.operation == "merge"])
10067                 self._pkg_count = self._pkg_count_class(
10068                         curval=0, maxval=merge_count)
10069                 self._status_display.maxval = self._pkg_count.maxval
10070
10071                 # The load average takes some time to respond when new
10072                 # jobs are added, so we need to limit the rate of adding
10073                 # new jobs.
10074                 self._job_delay_max = 10
10075                 self._job_delay_factor = 1.0
10076                 self._job_delay_exp = 1.5
10077                 self._previous_job_start_time = None
10078
10079                 self._set_digraph(digraph)
10080
10081                 # This is used to memoize the _choose_pkg() result when
10082                 # no packages can be chosen until one of the existing
10083                 # jobs completes.
10084                 self._choose_pkg_return_early = False
10085
10086                 features = self.settings.features
10087                 if "parallel-fetch" in features and \
10088                         not ("--pretend" in self.myopts or \
10089                         "--fetch-all-uri" in self.myopts or \
10090                         "--fetchonly" in self.myopts):
10091                         if "distlocks" not in features:
10092                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10093                                 portage.writemsg(red("!!!")+" parallel-fetching " + \
10094                                         "requires the distlocks feature enabled"+"\n",
10095                                         noiselevel=-1)
10096                                 portage.writemsg(red("!!!")+" you have it disabled, " + \
10097                                         "thus parallel-fetching is being disabled"+"\n",
10098                                         noiselevel=-1)
10099                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10100                         elif len(mergelist) > 1:
10101                                 self._parallel_fetch = True
10102
10103                 if self._parallel_fetch:
10104                                 # clear out existing fetch log if it exists
10105                                 try:
10106                                         open(self._fetch_log, 'w')
10107                                 except EnvironmentError:
10108                                         pass
10109
10110                 self._running_portage = None
10111                 portage_match = self._running_root.trees["vartree"].dbapi.match(
10112                         portage.const.PORTAGE_PACKAGE_ATOM)
10113                 if portage_match:
10114                         cpv = portage_match.pop()
10115                         self._running_portage = self._pkg(cpv, "installed",
10116                                 self._running_root, installed=True)
10117
10118         def _poll(self, timeout=None):
10119                 self._schedule()
10120                 PollScheduler._poll(self, timeout=timeout)
10121
10122         def _set_max_jobs(self, max_jobs):
10123                 self._max_jobs = max_jobs
10124                 self._task_queues.jobs.max_jobs = max_jobs
10125
10126         def _background_mode(self):
10127                 """
10128                 Check if background mode is enabled and adjust states as necessary.
10129
10130                 @rtype: bool
10131                 @returns: True if background mode is enabled, False otherwise.
10132                 """
10133                 background = (self._max_jobs is True or \
10134                         self._max_jobs > 1 or "--quiet" in self.myopts) and \
10135                         not bool(self._opts_no_background.intersection(self.myopts))
10136
10137                 if background:
10138                         interactive_tasks = self._get_interactive_tasks()
10139                         if interactive_tasks:
10140                                 background = False
10141                                 writemsg_level(">>> Sending package output to stdio due " + \
10142                                         "to interactive package(s):\n",
10143                                         level=logging.INFO, noiselevel=-1)
10144                                 msg = [""]
10145                                 for pkg in interactive_tasks:
10146                                         pkg_str = "  " + colorize("INFORM", str(pkg.cpv))
10147                                         if pkg.root != "/":
10148                                                 pkg_str += " for " + pkg.root
10149                                         msg.append(pkg_str)
10150                                 msg.append("")
10151                                 writemsg_level("".join("%s\n" % (l,) for l in msg),
10152                                         level=logging.INFO, noiselevel=-1)
10153                                 if self._max_jobs is True or self._max_jobs > 1:
10154                                         self._set_max_jobs(1)
10155                                         writemsg_level(">>> Setting --jobs=1 due " + \
10156                                                 "to the above interactive package(s)\n",
10157                                                 level=logging.INFO, noiselevel=-1)
10158
10159                 self._status_display.quiet = \
10160                         not background or \
10161                         ("--quiet" in self.myopts and \
10162                         "--verbose" not in self.myopts)
10163
10164                 self._logger.xterm_titles = \
10165                         "notitles" not in self.settings.features and \
10166                         self._status_display.quiet
10167
10168                 return background
10169
10170         def _get_interactive_tasks(self):
10171                 from portage import flatten
10172                 from portage.dep import use_reduce, paren_reduce
10173                 interactive_tasks = []
10174                 for task in self._mergelist:
10175                         if not (isinstance(task, Package) and \
10176                                 task.operation == "merge"):
10177                                 continue
10178                         try:
10179                                 properties = flatten(use_reduce(paren_reduce(
10180                                         task.metadata["PROPERTIES"]), uselist=task.use.enabled))
10181                         except portage.exception.InvalidDependString, e:
10182                                 show_invalid_depstring_notice(task,
10183                                         task.metadata["PROPERTIES"], str(e))
10184                                 raise self._unknown_internal_error()
10185                         if "interactive" in properties:
10186                                 interactive_tasks.append(task)
10187                 return interactive_tasks
10188
10189         def _set_digraph(self, digraph):
10190                 if "--nodeps" in self.myopts or \
10191                         (self._max_jobs is not True and self._max_jobs < 2):
10192                         # save some memory
10193                         self._digraph = None
10194                         return
10195
10196                 self._digraph = digraph
10197                 self._find_system_deps()
10198                 self._prune_digraph()
10199                 self._prevent_builddir_collisions()
10200
10201         def _find_system_deps(self):
10202                 """
10203                 Find system packages and their deep runtime dependencies. Before being
10204                 merged, these packages go to merge_wait_queue, to be merged when no
10205                 other packages are building.
10206                 """
10207                 deep_system_deps = self._deep_system_deps
10208                 deep_system_deps.clear()
10209                 deep_system_deps.update(
10210                         _find_deep_system_runtime_deps(self._digraph))
10211                 deep_system_deps.difference_update([pkg for pkg in \
10212                         deep_system_deps if pkg.operation != "merge"])
10213
10214         def _prune_digraph(self):
10215                 """
10216                 Prune any root nodes that are irrelevant.
10217                 """
10218
10219                 graph = self._digraph
10220                 completed_tasks = self._completed_tasks
10221                 removed_nodes = set()
10222                 while True:
10223                         for node in graph.root_nodes():
10224                                 if not isinstance(node, Package) or \
10225                                         (node.installed and node.operation == "nomerge") or \
10226                                         node.onlydeps or \
10227                                         node in completed_tasks:
10228                                         removed_nodes.add(node)
10229                         if removed_nodes:
10230                                 graph.difference_update(removed_nodes)
10231                         if not removed_nodes:
10232                                 break
10233                         removed_nodes.clear()
10234
10235         def _prevent_builddir_collisions(self):
10236                 """
10237                 When building stages, sometimes the same exact cpv needs to be merged
10238                 to both $ROOTs. Add edges to the digraph in order to avoid collisions
10239                 in the builddir. Currently, normal file locks would be inappropriate
10240                 for this purpose since emerge holds all of it's build dir locks from
10241                 the main process.
10242                 """
10243                 cpv_map = {}
10244                 for pkg in self._mergelist:
10245                         if not isinstance(pkg, Package):
10246                                 # a satisfied blocker
10247                                 continue
10248                         if pkg.installed:
10249                                 continue
10250                         if pkg.cpv not in cpv_map:
10251                                 cpv_map[pkg.cpv] = [pkg]
10252                                 continue
10253                         for earlier_pkg in cpv_map[pkg.cpv]:
10254                                 self._digraph.add(earlier_pkg, pkg,
10255                                         priority=DepPriority(buildtime=True))
10256                         cpv_map[pkg.cpv].append(pkg)
10257
10258         class _pkg_failure(portage.exception.PortageException):
10259                 """
10260                 An instance of this class is raised by unmerge() when
10261                 an uninstallation fails.
10262                 """
10263                 status = 1
10264                 def __init__(self, *pargs):
10265                         portage.exception.PortageException.__init__(self, pargs)
10266                         if pargs:
10267                                 self.status = pargs[0]
10268
10269         def _schedule_fetch(self, fetcher):
10270                 """
10271                 Schedule a fetcher on the fetch queue, in order to
10272                 serialize access to the fetch log.
10273                 """
10274                 self._task_queues.fetch.addFront(fetcher)
10275
10276         def _schedule_setup(self, setup_phase):
10277                 """
10278                 Schedule a setup phase on the merge queue, in order to
10279                 serialize unsandboxed access to the live filesystem.
10280                 """
10281                 self._task_queues.merge.addFront(setup_phase)
10282                 self._schedule()
10283
10284         def _schedule_unpack(self, unpack_phase):
10285                 """
10286                 Schedule an unpack phase on the unpack queue, in order
10287                 to serialize $DISTDIR access for live ebuilds.
10288                 """
10289                 self._task_queues.unpack.add(unpack_phase)
10290
10291         def _find_blockers(self, new_pkg):
10292                 """
10293                 Returns a callable which should be called only when
10294                 the vdb lock has been acquired.
10295                 """
10296                 def get_blockers():
10297                         return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
10298                 return get_blockers
10299
10300         def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
10301                 if self._opts_ignore_blockers.intersection(self.myopts):
10302                         return None
10303
10304                 # Call gc.collect() here to avoid heap overflow that
10305                 # triggers 'Cannot allocate memory' errors (reported
10306                 # with python-2.5).
10307                 import gc
10308                 gc.collect()
10309
10310                 blocker_db = self._blocker_db[new_pkg.root]
10311
10312                 blocker_dblinks = []
10313                 for blocking_pkg in blocker_db.findInstalledBlockers(
10314                         new_pkg, acquire_lock=acquire_lock):
10315                         if new_pkg.slot_atom == blocking_pkg.slot_atom:
10316                                 continue
10317                         if new_pkg.cpv == blocking_pkg.cpv:
10318                                 continue
10319                         blocker_dblinks.append(portage.dblink(
10320                                 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
10321                                 self.pkgsettings[blocking_pkg.root], treetype="vartree",
10322                                 vartree=self.trees[blocking_pkg.root]["vartree"]))
10323
10324                 gc.collect()
10325
10326                 return blocker_dblinks
10327
10328         def _dblink_pkg(self, pkg_dblink):
10329                 cpv = pkg_dblink.mycpv
10330                 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10331                 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10332                 installed = type_name == "installed"
10333                 return self._pkg(cpv, type_name, root_config, installed=installed)
10334
10335         def _append_to_log_path(self, log_path, msg):
10336                 f = open(log_path, 'a')
10337                 try:
10338                         f.write(msg)
10339                 finally:
10340                         f.close()
10341
10342         def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10343
10344                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10345                 log_file = None
10346                 out = sys.stdout
10347                 background = self._background
10348
10349                 if background and log_path is not None:
10350                         log_file = open(log_path, 'a')
10351                         out = log_file
10352
10353                 try:
10354                         for msg in msgs:
10355                                 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10356                 finally:
10357                         if log_file is not None:
10358                                 log_file.close()
10359
10360         def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10361                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10362                 background = self._background
10363
10364                 if log_path is None:
10365                         if not (background and level < logging.WARN):
10366                                 portage.util.writemsg_level(msg,
10367                                         level=level, noiselevel=noiselevel)
10368                 else:
10369                         if not background:
10370                                 portage.util.writemsg_level(msg,
10371                                         level=level, noiselevel=noiselevel)
10372                         self._append_to_log_path(log_path, msg)
10373
10374         def _dblink_ebuild_phase(self,
10375                 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10376                 """
10377                 Using this callback for merge phases allows the scheduler
10378                 to run while these phases execute asynchronously, and allows
10379                 the scheduler control output handling.
10380                 """
10381
10382                 scheduler = self._sched_iface
10383                 settings = pkg_dblink.settings
10384                 pkg = self._dblink_pkg(pkg_dblink)
10385                 background = self._background
10386                 log_path = settings.get("PORTAGE_LOG_FILE")
10387
10388                 ebuild_phase = EbuildPhase(background=background,
10389                         pkg=pkg, phase=phase, scheduler=scheduler,
10390                         settings=settings, tree=pkg_dblink.treetype)
10391                 ebuild_phase.start()
10392                 ebuild_phase.wait()
10393
10394                 return ebuild_phase.returncode
10395
10396         def _check_manifests(self):
10397                 # Verify all the manifests now so that the user is notified of failure
10398                 # as soon as possible.
10399                 if "strict" not in self.settings.features or \
10400                         "--fetchonly" in self.myopts or \
10401                         "--fetch-all-uri" in self.myopts:
10402                         return os.EX_OK
10403
10404                 shown_verifying_msg = False
10405                 quiet_settings = {}
10406                 for myroot, pkgsettings in self.pkgsettings.iteritems():
10407                         quiet_config = portage.config(clone=pkgsettings)
10408                         quiet_config["PORTAGE_QUIET"] = "1"
10409                         quiet_config.backup_changes("PORTAGE_QUIET")
10410                         quiet_settings[myroot] = quiet_config
10411                         del quiet_config
10412
10413                 for x in self._mergelist:
10414                         if not isinstance(x, Package) or \
10415                                 x.type_name != "ebuild":
10416                                 continue
10417
10418                         if not shown_verifying_msg:
10419                                 shown_verifying_msg = True
10420                                 self._status_msg("Verifying ebuild manifests")
10421
10422                         root_config = x.root_config
10423                         portdb = root_config.trees["porttree"].dbapi
10424                         quiet_config = quiet_settings[root_config.root]
10425                         quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10426                         if not portage.digestcheck([], quiet_config, strict=True):
10427                                 return 1
10428
10429                 return os.EX_OK
10430
10431         def _add_prefetchers(self):
10432
10433                 if not self._parallel_fetch:
10434                         return
10435
10436                 if self._parallel_fetch:
10437                         self._status_msg("Starting parallel fetch")
10438
10439                         prefetchers = self._prefetchers
10440                         getbinpkg = "--getbinpkg" in self.myopts
10441
10442                         # In order to avoid "waiting for lock" messages
10443                         # at the beginning, which annoy users, never
10444                         # spawn a prefetcher for the first package.
10445                         for pkg in self._mergelist[1:]:
10446                                 prefetcher = self._create_prefetcher(pkg)
10447                                 if prefetcher is not None:
10448                                         self._task_queues.fetch.add(prefetcher)
10449                                         prefetchers[pkg] = prefetcher
10450
10451         def _create_prefetcher(self, pkg):
10452                 """
10453                 @return: a prefetcher, or None if not applicable
10454                 """
10455                 prefetcher = None
10456
10457                 if not isinstance(pkg, Package):
10458                         pass
10459
10460                 elif pkg.type_name == "ebuild":
10461
10462                         prefetcher = EbuildFetcher(background=True,
10463                                 config_pool=self._ConfigPool(pkg.root,
10464                                 self._allocate_config, self._deallocate_config),
10465                                 fetchonly=1, logfile=self._fetch_log,
10466                                 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10467
10468                 elif pkg.type_name == "binary" and \
10469                         "--getbinpkg" in self.myopts and \
10470                         pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10471
10472                         prefetcher = BinpkgPrefetcher(background=True,
10473                                 pkg=pkg, scheduler=self._sched_iface)
10474
10475                 return prefetcher
10476
10477         def _is_restart_scheduled(self):
10478                 """
10479                 Check if the merge list contains a replacement
10480                 for the current running instance, that will result
10481                 in restart after merge.
10482                 @rtype: bool
10483                 @returns: True if a restart is scheduled, False otherwise.
10484                 """
10485                 if self._opts_no_restart.intersection(self.myopts):
10486                         return False
10487
10488                 mergelist = self._mergelist
10489
10490                 for i, pkg in enumerate(mergelist):
10491                         if self._is_restart_necessary(pkg) and \
10492                                 i != len(mergelist) - 1:
10493                                 return True
10494
10495                 return False
10496
10497         def _is_restart_necessary(self, pkg):
10498                 """
10499                 @return: True if merging the given package
10500                         requires restart, False otherwise.
10501                 """
10502
10503                 # Figure out if we need a restart.
10504                 if pkg.root == self._running_root.root and \
10505                         portage.match_from_list(
10506                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10507                         if self._running_portage:
10508                                 return pkg.cpv != self._running_portage.cpv
10509                         return True
10510                 return False
10511
10512         def _restart_if_necessary(self, pkg):
10513                 """
10514                 Use execv() to restart emerge. This happens
10515                 if portage upgrades itself and there are
10516                 remaining packages in the list.
10517                 """
10518
10519                 if self._opts_no_restart.intersection(self.myopts):
10520                         return
10521
10522                 if not self._is_restart_necessary(pkg):
10523                         return
10524
10525                 if pkg == self._mergelist[-1]:
10526                         return
10527
10528                 self._main_loop_cleanup()
10529
10530                 logger = self._logger
10531                 pkg_count = self._pkg_count
10532                 mtimedb = self._mtimedb
10533                 bad_resume_opts = self._bad_resume_opts
10534
10535                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10536                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10537
10538                 logger.log(" *** RESTARTING " + \
10539                         "emerge via exec() after change of " + \
10540                         "portage version.")
10541
10542                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10543                 mtimedb.commit()
10544                 portage.run_exitfuncs()
10545                 mynewargv = [sys.argv[0], "--resume"]
10546                 resume_opts = self.myopts.copy()
10547                 # For automatic resume, we need to prevent
10548                 # any of bad_resume_opts from leaking in
10549                 # via EMERGE_DEFAULT_OPTS.
10550                 resume_opts["--ignore-default-opts"] = True
10551                 for myopt, myarg in resume_opts.iteritems():
10552                         if myopt not in bad_resume_opts:
10553                                 if myarg is True:
10554                                         mynewargv.append(myopt)
10555                                 else:
10556                                         mynewargv.append(myopt +"="+ str(myarg))
10557                 # priority only needs to be adjusted on the first run
10558                 os.environ["PORTAGE_NICENESS"] = "0"
10559                 os.execv(mynewargv[0], mynewargv)
10560
10561         def merge(self):
10562
10563                 if "--resume" in self.myopts:
10564                         # We're resuming.
10565                         portage.writemsg_stdout(
10566                                 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10567                         self._logger.log(" *** Resuming merge...")
10568
10569                 self._save_resume_list()
10570
10571                 try:
10572                         self._background = self._background_mode()
10573                 except self._unknown_internal_error:
10574                         return 1
10575
10576                 for root in self.trees:
10577                         root_config = self.trees[root]["root_config"]
10578
10579                         # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10580                         # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10581                         # for ensuring sane $PWD (bug #239560) and storing elog messages.
10582                         tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10583                         if not tmpdir or not os.path.isdir(tmpdir):
10584                                 msg = "The directory specified in your " + \
10585                                         "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10586                                 "does not exist. Please create this " + \
10587                                 "directory or correct your PORTAGE_TMPDIR setting."
10588                                 msg = textwrap.wrap(msg, 70)
10589                                 out = portage.output.EOutput()
10590                                 for l in msg:
10591                                         out.eerror(l)
10592                                 return 1
10593
10594                         if self._background:
10595                                 root_config.settings.unlock()
10596                                 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10597                                 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10598                                 root_config.settings.lock()
10599
10600                         self.pkgsettings[root] = portage.config(
10601                                 clone=root_config.settings)
10602
10603                 rval = self._check_manifests()
10604                 if rval != os.EX_OK:
10605                         return rval
10606
10607                 keep_going = "--keep-going" in self.myopts
10608                 fetchonly = self._build_opts.fetchonly
10609                 mtimedb = self._mtimedb
10610                 failed_pkgs = self._failed_pkgs
10611
10612                 while True:
10613                         rval = self._merge()
10614                         if rval == os.EX_OK or fetchonly or not keep_going:
10615                                 break
10616                         if "resume" not in mtimedb:
10617                                 break
10618                         mergelist = self._mtimedb["resume"].get("mergelist")
10619                         if not mergelist:
10620                                 break
10621
10622                         if not failed_pkgs:
10623                                 break
10624
10625                         for failed_pkg in failed_pkgs:
10626                                 mergelist.remove(list(failed_pkg.pkg))
10627
10628                         self._failed_pkgs_all.extend(failed_pkgs)
10629                         del failed_pkgs[:]
10630
10631                         if not mergelist:
10632                                 break
10633
10634                         if not self._calc_resume_list():
10635                                 break
10636
10637                         clear_caches(self.trees)
10638                         if not self._mergelist:
10639                                 break
10640
10641                         self._save_resume_list()
10642                         self._pkg_count.curval = 0
10643                         self._pkg_count.maxval = len([x for x in self._mergelist \
10644                                 if isinstance(x, Package) and x.operation == "merge"])
10645                         self._status_display.maxval = self._pkg_count.maxval
10646
10647                 self._logger.log(" *** Finished. Cleaning up...")
10648
10649                 if failed_pkgs:
10650                         self._failed_pkgs_all.extend(failed_pkgs)
10651                         del failed_pkgs[:]
10652
10653                 background = self._background
10654                 failure_log_shown = False
10655                 if background and len(self._failed_pkgs_all) == 1:
10656                         # If only one package failed then just show it's
10657                         # whole log for easy viewing.
10658                         failed_pkg = self._failed_pkgs_all[-1]
10659                         build_dir = failed_pkg.build_dir
10660                         log_file = None
10661
10662                         log_paths = [failed_pkg.build_log]
10663
10664                         log_path = self._locate_failure_log(failed_pkg)
10665                         if log_path is not None:
10666                                 try:
10667                                         log_file = open(log_path)
10668                                 except IOError:
10669                                         pass
10670
10671                         if log_file is not None:
10672                                 try:
10673                                         for line in log_file:
10674                                                 writemsg_level(line, noiselevel=-1)
10675                                 finally:
10676                                         log_file.close()
10677                                 failure_log_shown = True
10678
10679                 # Dump mod_echo output now since it tends to flood the terminal.
10680                 # This allows us to avoid having more important output, generated
10681                 # later, from being swept away by the mod_echo output.
10682                 mod_echo_output =  _flush_elog_mod_echo()
10683
10684                 if background and not failure_log_shown and \
10685                         self._failed_pkgs_all and \
10686                         self._failed_pkgs_die_msgs and \
10687                         not mod_echo_output:
10688
10689                         printer = portage.output.EOutput()
10690                         for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10691                                 root_msg = ""
10692                                 if mysettings["ROOT"] != "/":
10693                                         root_msg = " merged to %s" % mysettings["ROOT"]
10694                                 print
10695                                 printer.einfo("Error messages for package %s%s:" % \
10696                                         (colorize("INFORM", key), root_msg))
10697                                 print
10698                                 for phase in portage.const.EBUILD_PHASES:
10699                                         if phase not in logentries:
10700                                                 continue
10701                                         for msgtype, msgcontent in logentries[phase]:
10702                                                 if isinstance(msgcontent, basestring):
10703                                                         msgcontent = [msgcontent]
10704                                                 for line in msgcontent:
10705                                                         printer.eerror(line.strip("\n"))
10706
10707                 if self._post_mod_echo_msgs:
10708                         for msg in self._post_mod_echo_msgs:
10709                                 msg()
10710
10711                 if len(self._failed_pkgs_all) > 1 or \
10712                         (self._failed_pkgs_all and "--keep-going" in self.myopts):
10713                         if len(self._failed_pkgs_all) > 1:
10714                                 msg = "The following %d packages have " % \
10715                                         len(self._failed_pkgs_all) + \
10716                                         "failed to build or install:"
10717                         else:
10718                                 msg = "The following package has " + \
10719                                         "failed to build or install:"
10720                         prefix = bad(" * ")
10721                         writemsg(prefix + "\n", noiselevel=-1)
10722                         from textwrap import wrap
10723                         for line in wrap(msg, 72):
10724                                 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10725                         writemsg(prefix + "\n", noiselevel=-1)
10726                         for failed_pkg in self._failed_pkgs_all:
10727                                 writemsg("%s\t%s\n" % (prefix,
10728                                         colorize("INFORM", str(failed_pkg.pkg))),
10729                                         noiselevel=-1)
10730                         writemsg(prefix + "\n", noiselevel=-1)
10731
10732                 return rval
10733
10734         def _elog_listener(self, mysettings, key, logentries, fulltext):
10735                 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10736                 if errors:
10737                         self._failed_pkgs_die_msgs.append(
10738                                 (mysettings, key, errors))
10739
10740         def _locate_failure_log(self, failed_pkg):
10741
10742                 build_dir = failed_pkg.build_dir
10743                 log_file = None
10744
10745                 log_paths = [failed_pkg.build_log]
10746
10747                 for log_path in log_paths:
10748                         if not log_path:
10749                                 continue
10750
10751                         try:
10752                                 log_size = os.stat(log_path).st_size
10753                         except OSError:
10754                                 continue
10755
10756                         if log_size == 0:
10757                                 continue
10758
10759                         return log_path
10760
10761                 return None
10762
10763         def _add_packages(self):
10764                 pkg_queue = self._pkg_queue
10765                 for pkg in self._mergelist:
10766                         if isinstance(pkg, Package):
10767                                 pkg_queue.append(pkg)
10768                         elif isinstance(pkg, Blocker):
10769                                 pass
10770
10771         def _merge_wait_exit_handler(self, task):
10772                 self._merge_wait_scheduled.remove(task)
10773                 self._merge_exit(task)
10774
10775         def _merge_exit(self, merge):
10776                 self._do_merge_exit(merge)
10777                 self._deallocate_config(merge.merge.settings)
10778                 if merge.returncode == os.EX_OK and \
10779                         not merge.merge.pkg.installed:
10780                         self._status_display.curval += 1
10781                 self._status_display.merges = len(self._task_queues.merge)
10782                 self._schedule()
10783
10784         def _do_merge_exit(self, merge):
10785                 pkg = merge.merge.pkg
10786                 if merge.returncode != os.EX_OK:
10787                         settings = merge.merge.settings
10788                         build_dir = settings.get("PORTAGE_BUILDDIR")
10789                         build_log = settings.get("PORTAGE_LOG_FILE")
10790
10791                         self._failed_pkgs.append(self._failed_pkg(
10792                                 build_dir=build_dir, build_log=build_log,
10793                                 pkg=pkg,
10794                                 returncode=merge.returncode))
10795                         self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
10796
10797                         self._status_display.failed = len(self._failed_pkgs)
10798                         return
10799
10800                 self._task_complete(pkg)
10801                 pkg_to_replace = merge.merge.pkg_to_replace
10802                 if pkg_to_replace is not None:
10803                         # When a package is replaced, mark it's uninstall
10804                         # task complete (if any).
10805                         uninst_hash_key = \
10806                                 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
10807                         self._task_complete(uninst_hash_key)
10808
10809                 if pkg.installed:
10810                         return
10811
10812                 self._restart_if_necessary(pkg)
10813
10814                 # Call mtimedb.commit() after each merge so that
10815                 # --resume still works after being interrupted
10816                 # by reboot, sigkill or similar.
10817                 mtimedb = self._mtimedb
10818                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10819                 if not mtimedb["resume"]["mergelist"]:
10820                         del mtimedb["resume"]
10821                 mtimedb.commit()
10822
10823         def _build_exit(self, build):
10824                 if build.returncode == os.EX_OK:
10825                         self.curval += 1
10826                         merge = PackageMerge(merge=build)
10827                         if not build.build_opts.buildpkgonly and \
10828                                 build.pkg in self._deep_system_deps:
10829                                 # Since dependencies on system packages are frequently
10830                                 # unspecified, merge them only when no builds are executing.
10831                                 self._merge_wait_queue.append(merge)
10832                         else:
10833                                 merge.addExitListener(self._merge_exit)
10834                                 self._task_queues.merge.add(merge)
10835                                 self._status_display.merges = len(self._task_queues.merge)
10836                 else:
10837                         settings = build.settings
10838                         build_dir = settings.get("PORTAGE_BUILDDIR")
10839                         build_log = settings.get("PORTAGE_LOG_FILE")
10840
10841                         self._failed_pkgs.append(self._failed_pkg(
10842                                 build_dir=build_dir, build_log=build_log,
10843                                 pkg=build.pkg,
10844                                 returncode=build.returncode))
10845                         self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
10846
10847                         self._status_display.failed = len(self._failed_pkgs)
10848                         self._deallocate_config(build.settings)
10849                 self._jobs -= 1
10850                 self._status_display.running = self._jobs
10851                 self._schedule()
10852
10853         def _extract_exit(self, build):
10854                 self._build_exit(build)
10855
10856         def _task_complete(self, pkg):
10857                 self._completed_tasks.add(pkg)
10858                 self._choose_pkg_return_early = False
10859
10860         def _merge(self):
10861
10862                 self._add_prefetchers()
10863                 self._add_packages()
10864                 pkg_queue = self._pkg_queue
10865                 failed_pkgs = self._failed_pkgs
10866                 portage.locks._quiet = self._background
10867                 portage.elog._emerge_elog_listener = self._elog_listener
10868                 rval = os.EX_OK
10869
10870                 try:
10871                         self._main_loop()
10872                 finally:
10873                         self._main_loop_cleanup()
10874                         portage.locks._quiet = False
10875                         portage.elog._emerge_elog_listener = None
10876                         if failed_pkgs:
10877                                 rval = failed_pkgs[-1].returncode
10878
10879                 return rval
10880
10881         def _main_loop_cleanup(self):
10882                 del self._pkg_queue[:]
10883                 self._completed_tasks.clear()
10884                 self._deep_system_deps.clear()
10885                 self._choose_pkg_return_early = False
10886                 self._status_display.reset()
10887                 self._digraph = None
10888                 self._task_queues.fetch.clear()
10889
10890         def _choose_pkg(self):
10891                 """
10892                 Choose a task that has all it's dependencies satisfied.
10893                 """
10894
10895                 if self._choose_pkg_return_early:
10896                         return None
10897
10898                 if self._digraph is None:
10899                         if (self._jobs or self._task_queues.merge) and \
10900                                 not ("--nodeps" in self.myopts and \
10901                                 (self._max_jobs is True or self._max_jobs > 1)):
10902                                 self._choose_pkg_return_early = True
10903                                 return None
10904                         return self._pkg_queue.pop(0)
10905
10906                 if not (self._jobs or self._task_queues.merge):
10907                         return self._pkg_queue.pop(0)
10908
10909                 self._prune_digraph()
10910
10911                 chosen_pkg = None
10912                 later = set(self._pkg_queue)
10913                 for pkg in self._pkg_queue:
10914                         later.remove(pkg)
10915                         if not self._dependent_on_scheduled_merges(pkg, later):
10916                                 chosen_pkg = pkg
10917                                 break
10918
10919                 if chosen_pkg is not None:
10920                         self._pkg_queue.remove(chosen_pkg)
10921
10922                 if chosen_pkg is None:
10923                         # There's no point in searching for a package to
10924                         # choose until at least one of the existing jobs
10925                         # completes.
10926                         self._choose_pkg_return_early = True
10927
10928                 return chosen_pkg
10929
10930         def _dependent_on_scheduled_merges(self, pkg, later):
10931                 """
10932                 Traverse the subgraph of the given packages deep dependencies
10933                 to see if it contains any scheduled merges.
10934                 @param pkg: a package to check dependencies for
10935                 @type pkg: Package
10936                 @param later: packages for which dependence should be ignored
10937                         since they will be merged later than pkg anyway and therefore
10938                         delaying the merge of pkg will not result in a more optimal
10939                         merge order
10940                 @type later: set
10941                 @rtype: bool
10942                 @returns: True if the package is dependent, False otherwise.
10943                 """
10944
10945                 graph = self._digraph
10946                 completed_tasks = self._completed_tasks
10947
10948                 dependent = False
10949                 traversed_nodes = set([pkg])
10950                 direct_deps = graph.child_nodes(pkg)
10951                 node_stack = direct_deps
10952                 direct_deps = frozenset(direct_deps)
10953                 while node_stack:
10954                         node = node_stack.pop()
10955                         if node in traversed_nodes:
10956                                 continue
10957                         traversed_nodes.add(node)
10958                         if not ((node.installed and node.operation == "nomerge") or \
10959                                 (node.operation == "uninstall" and \
10960                                 node not in direct_deps) or \
10961                                 node in completed_tasks or \
10962                                 node in later):
10963                                 dependent = True
10964                                 break
10965                         node_stack.extend(graph.child_nodes(node))
10966
10967                 return dependent
10968
10969         def _allocate_config(self, root):
10970                 """
10971                 Allocate a unique config instance for a task in order
10972                 to prevent interference between parallel tasks.
10973                 """
10974                 if self._config_pool[root]:
10975                         temp_settings = self._config_pool[root].pop()
10976                 else:
10977                         temp_settings = portage.config(clone=self.pkgsettings[root])
10978                 # Since config.setcpv() isn't guaranteed to call config.reset() due to
10979                 # performance reasons, call it here to make sure all settings from the
10980                 # previous package get flushed out (such as PORTAGE_LOG_FILE).
10981                 temp_settings.reload()
10982                 temp_settings.reset()
10983                 return temp_settings
10984
10985         def _deallocate_config(self, settings):
10986                 self._config_pool[settings["ROOT"]].append(settings)
10987
10988         def _main_loop(self):
10989
10990                 # Only allow 1 job max if a restart is scheduled
10991                 # due to portage update.
10992                 if self._is_restart_scheduled() or \
10993                         self._opts_no_background.intersection(self.myopts):
10994                         self._set_max_jobs(1)
10995
10996                 merge_queue = self._task_queues.merge
10997
10998                 while self._schedule():
10999                         if self._poll_event_handlers:
11000                                 self._poll_loop()
11001
11002                 while True:
11003                         self._schedule()
11004                         if not (self._jobs or merge_queue):
11005                                 break
11006                         if self._poll_event_handlers:
11007                                 self._poll_loop()
11008
11009         def _keep_scheduling(self):
11010                 return bool(self._pkg_queue and \
11011                         not (self._failed_pkgs and not self._build_opts.fetchonly))
11012
11013         def _schedule_tasks(self):
11014
11015                 # When the number of jobs drops to zero, process all waiting merges.
11016                 if not self._jobs and self._merge_wait_queue:
11017                         for task in self._merge_wait_queue:
11018                                 task.addExitListener(self._merge_wait_exit_handler)
11019                                 self._task_queues.merge.add(task)
11020                         self._status_display.merges = len(self._task_queues.merge)
11021                         self._merge_wait_scheduled.extend(self._merge_wait_queue)
11022                         del self._merge_wait_queue[:]
11023
11024                 self._schedule_tasks_imp()
11025                 self._status_display.display()
11026
11027                 state_change = 0
11028                 for q in self._task_queues.values():
11029                         if q.schedule():
11030                                 state_change += 1
11031
11032                 # Cancel prefetchers if they're the only reason
11033                 # the main poll loop is still running.
11034                 if self._failed_pkgs and not self._build_opts.fetchonly and \
11035                         not (self._jobs or self._task_queues.merge) and \
11036                         self._task_queues.fetch:
11037                         self._task_queues.fetch.clear()
11038                         state_change += 1
11039
11040                 if state_change:
11041                         self._schedule_tasks_imp()
11042                         self._status_display.display()
11043
11044                 return self._keep_scheduling()
11045
11046         def _job_delay(self):
11047                 """
11048                 @rtype: bool
11049                 @returns: True if job scheduling should be delayed, False otherwise.
11050                 """
11051
11052                 if self._jobs and self._max_load is not None:
11053
11054                         current_time = time.time()
11055
11056                         delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
11057                         if delay > self._job_delay_max:
11058                                 delay = self._job_delay_max
11059                         if (current_time - self._previous_job_start_time) < delay:
11060                                 return True
11061
11062                 return False
11063
11064         def _schedule_tasks_imp(self):
11065                 """
11066                 @rtype: bool
11067                 @returns: True if state changed, False otherwise.
11068                 """
11069
11070                 state_change = 0
11071
11072                 while True:
11073
11074                         if not self._keep_scheduling():
11075                                 return bool(state_change)
11076
11077                         if self._choose_pkg_return_early or \
11078                                 self._merge_wait_scheduled or \
11079                                 not self._can_add_job() or \
11080                                 self._job_delay():
11081                                 return bool(state_change)
11082
11083                         pkg = self._choose_pkg()
11084                         if pkg is None:
11085                                 return bool(state_change)
11086
11087                         state_change += 1
11088
11089                         if not pkg.installed:
11090                                 self._pkg_count.curval += 1
11091
11092                         task = self._task(pkg)
11093
11094                         if pkg.installed:
11095                                 merge = PackageMerge(merge=task)
11096                                 merge.addExitListener(self._merge_exit)
11097                                 self._task_queues.merge.add(merge)
11098
11099                         elif pkg.built:
11100                                 self._jobs += 1
11101                                 self._previous_job_start_time = time.time()
11102                                 self._status_display.running = self._jobs
11103                                 task.addExitListener(self._extract_exit)
11104                                 self._task_queues.jobs.add(task)
11105
11106                         else:
11107                                 self._jobs += 1
11108                                 self._previous_job_start_time = time.time()
11109                                 self._status_display.running = self._jobs
11110                                 task.addExitListener(self._build_exit)
11111                                 self._task_queues.jobs.add(task)
11112
11113                 return bool(state_change)
11114
11115         def _task(self, pkg):
11116
11117                 pkg_to_replace = None
11118                 if pkg.operation != "uninstall":
11119                         vardb = pkg.root_config.trees["vartree"].dbapi
11120                         previous_cpv = vardb.match(pkg.slot_atom)
11121                         if previous_cpv:
11122                                 previous_cpv = previous_cpv.pop()
11123                                 pkg_to_replace = self._pkg(previous_cpv,
11124                                         "installed", pkg.root_config, installed=True)
11125
11126                 task = MergeListItem(args_set=self._args_set,
11127                         background=self._background, binpkg_opts=self._binpkg_opts,
11128                         build_opts=self._build_opts,
11129                         config_pool=self._ConfigPool(pkg.root,
11130                         self._allocate_config, self._deallocate_config),
11131                         emerge_opts=self.myopts,
11132                         find_blockers=self._find_blockers(pkg), logger=self._logger,
11133                         mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
11134                         pkg_to_replace=pkg_to_replace,
11135                         prefetcher=self._prefetchers.get(pkg),
11136                         scheduler=self._sched_iface,
11137                         settings=self._allocate_config(pkg.root),
11138                         statusMessage=self._status_msg,
11139                         world_atom=self._world_atom)
11140
11141                 return task
11142
11143         def _failed_pkg_msg(self, failed_pkg, action, preposition):
11144                 pkg = failed_pkg.pkg
11145                 msg = "%s to %s %s" % \
11146                         (bad("Failed"), action, colorize("INFORM", pkg.cpv))
11147                 if pkg.root != "/":
11148                         msg += " %s %s" % (preposition, pkg.root)
11149
11150                 log_path = self._locate_failure_log(failed_pkg)
11151                 if log_path is not None:
11152                         msg += ", Log file:"
11153                 self._status_msg(msg)
11154
11155                 if log_path is not None:
11156                         self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
11157
11158         def _status_msg(self, msg):
11159                 """
11160                 Display a brief status message (no newlines) in the status display.
11161                 This is called by tasks to provide feedback to the user. This
11162                 delegates the resposibility of generating \r and \n control characters,
11163                 to guarantee that lines are created or erased when necessary and
11164                 appropriate.
11165
11166                 @type msg: str
11167                 @param msg: a brief status message (no newlines allowed)
11168                 """
11169                 if not self._background:
11170                         writemsg_level("\n")
11171                 self._status_display.displayMessage(msg)
11172
11173         def _save_resume_list(self):
11174                 """
11175                 Do this before verifying the ebuild Manifests since it might
11176                 be possible for the user to use --resume --skipfirst get past
11177                 a non-essential package with a broken digest.
11178                 """
11179                 mtimedb = self._mtimedb
11180                 mtimedb["resume"]["mergelist"] = [list(x) \
11181                         for x in self._mergelist \
11182                         if isinstance(x, Package) and x.operation == "merge"]
11183
11184                 mtimedb.commit()
11185
11186         def _calc_resume_list(self):
11187                 """
11188                 Use the current resume list to calculate a new one,
11189                 dropping any packages with unsatisfied deps.
11190                 @rtype: bool
11191                 @returns: True if successful, False otherwise.
11192                 """
11193                 print colorize("GOOD", "*** Resuming merge...")
11194
11195                 if self._show_list():
11196                         if "--tree" in self.myopts:
11197                                 portage.writemsg_stdout("\n" + \
11198                                         darkgreen("These are the packages that " + \
11199                                         "would be merged, in reverse order:\n\n"))
11200
11201                         else:
11202                                 portage.writemsg_stdout("\n" + \
11203                                         darkgreen("These are the packages that " + \
11204                                         "would be merged, in order:\n\n"))
11205
11206                 show_spinner = "--quiet" not in self.myopts and \
11207                         "--nodeps" not in self.myopts
11208
11209                 if show_spinner:
11210                         print "Calculating dependencies  ",
11211
11212                 myparams = create_depgraph_params(self.myopts, None)
11213                 success = False
11214                 e = None
11215                 try:
11216                         success, mydepgraph, dropped_tasks = resume_depgraph(
11217                                 self.settings, self.trees, self._mtimedb, self.myopts,
11218                                 myparams, self._spinner)
11219                 except depgraph.UnsatisfiedResumeDep, exc:
11220                         # rename variable to avoid python-3.0 error:
11221                         # SyntaxError: can not delete variable 'e' referenced in nested
11222                         #              scope
11223                         e = exc
11224                         mydepgraph = e.depgraph
11225                         dropped_tasks = set()
11226
11227                 if show_spinner:
11228                         print "\b\b... done!"
11229
11230                 if e is not None:
11231                         def unsatisfied_resume_dep_msg():
11232                                 mydepgraph.display_problems()
11233                                 out = portage.output.EOutput()
11234                                 out.eerror("One or more packages are either masked or " + \
11235                                         "have missing dependencies:")
11236                                 out.eerror("")
11237                                 indent = "  "
11238                                 show_parents = set()
11239                                 for dep in e.value:
11240                                         if dep.parent in show_parents:
11241                                                 continue
11242                                         show_parents.add(dep.parent)
11243                                         if dep.atom is None:
11244                                                 out.eerror(indent + "Masked package:")
11245                                                 out.eerror(2 * indent + str(dep.parent))
11246                                                 out.eerror("")
11247                                         else:
11248                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
11249                                                 out.eerror(2 * indent + str(dep.parent))
11250                                                 out.eerror("")
11251                                 msg = "The resume list contains packages " + \
11252                                         "that are either masked or have " + \
11253                                         "unsatisfied dependencies. " + \
11254                                         "Please restart/continue " + \
11255                                         "the operation manually, or use --skipfirst " + \
11256                                         "to skip the first package in the list and " + \
11257                                         "any other packages that may be " + \
11258                                         "masked or have missing dependencies."
11259                                 for line in textwrap.wrap(msg, 72):
11260                                         out.eerror(line)
11261                         self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
11262                         return False
11263
11264                 if success and self._show_list():
11265                         mylist = mydepgraph.altlist()
11266                         if mylist:
11267                                 if "--tree" in self.myopts:
11268                                         mylist.reverse()
11269                                 mydepgraph.display(mylist, favorites=self._favorites)
11270
11271                 if not success:
11272                         self._post_mod_echo_msgs.append(mydepgraph.display_problems)
11273                         return False
11274                 mydepgraph.display_problems()
11275
11276                 mylist = mydepgraph.altlist()
11277                 mydepgraph.break_refs(mylist)
11278                 mydepgraph.break_refs(dropped_tasks)
11279                 self._mergelist = mylist
11280                 self._set_digraph(mydepgraph.schedulerGraph())
11281
11282                 msg_width = 75
11283                 for task in dropped_tasks:
11284                         if not (isinstance(task, Package) and task.operation == "merge"):
11285                                 continue
11286                         pkg = task
11287                         msg = "emerge --keep-going:" + \
11288                                 " %s" % (pkg.cpv,)
11289                         if pkg.root != "/":
11290                                 msg += " for %s" % (pkg.root,)
11291                         msg += " dropped due to unsatisfied dependency."
11292                         for line in textwrap.wrap(msg, msg_width):
11293                                 eerror(line, phase="other", key=pkg.cpv)
11294                         settings = self.pkgsettings[pkg.root]
11295                         # Ensure that log collection from $T is disabled inside
11296                         # elog_process(), since any logs that might exist are
11297                         # not valid here.
11298                         settings.pop("T", None)
11299                         portage.elog.elog_process(pkg.cpv, settings)
11300                         self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
11301
11302                 return True
11303
11304         def _show_list(self):
11305                 myopts = self.myopts
11306                 if "--quiet" not in myopts and \
11307                         ("--ask" in myopts or "--tree" in myopts or \
11308                         "--verbose" in myopts):
11309                         return True
11310                 return False
11311
11312         def _world_atom(self, pkg):
11313                 """
11314                 Add the package to the world file, but only if
11315                 it's supposed to be added. Otherwise, do nothing.
11316                 """
11317
11318                 if set(("--buildpkgonly", "--fetchonly",
11319                         "--fetch-all-uri",
11320                         "--oneshot", "--onlydeps",
11321                         "--pretend")).intersection(self.myopts):
11322                         return
11323
11324                 if pkg.root != self.target_root:
11325                         return
11326
11327                 args_set = self._args_set
11328                 if not args_set.findAtomForPackage(pkg):
11329                         return
11330
11331                 logger = self._logger
11332                 pkg_count = self._pkg_count
11333                 root_config = pkg.root_config
11334                 world_set = root_config.sets["world"]
11335                 world_locked = False
11336                 if hasattr(world_set, "lock"):
11337                         world_set.lock()
11338                         world_locked = True
11339
11340                 try:
11341                         if hasattr(world_set, "load"):
11342                                 world_set.load() # maybe it's changed on disk
11343
11344                         atom = create_world_atom(pkg, args_set, root_config)
11345                         if atom:
11346                                 if hasattr(world_set, "add"):
11347                                         self._status_msg(('Recording %s in "world" ' + \
11348                                                 'favorites file...') % atom)
11349                                         logger.log(" === (%s of %s) Updating world file (%s)" % \
11350                                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
11351                                         world_set.add(atom)
11352                                 else:
11353                                         writemsg_level('\n!!! Unable to record %s in "world"\n' % \
11354                                                 (atom,), level=logging.WARN, noiselevel=-1)
11355                 finally:
11356                         if world_locked:
11357                                 world_set.unlock()
11358
11359         def _pkg(self, cpv, type_name, root_config, installed=False):
11360                 """
11361                 Get a package instance from the cache, or create a new
11362                 one if necessary. Raises KeyError from aux_get if it
11363                 failures for some reason (package does not exist or is
11364                 corrupt).
11365                 """
11366                 operation = "merge"
11367                 if installed:
11368                         operation = "nomerge"
11369
11370                 if self._digraph is not None:
11371                         # Reuse existing instance when available.
11372                         pkg = self._digraph.get(
11373                                 (type_name, root_config.root, cpv, operation))
11374                         if pkg is not None:
11375                                 return pkg
11376
11377                 tree_type = depgraph.pkg_tree_map[type_name]
11378                 db = root_config.trees[tree_type].dbapi
11379                 db_keys = list(self.trees[root_config.root][
11380                         tree_type].dbapi._aux_cache_keys)
11381                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11382                 pkg = Package(cpv=cpv, metadata=metadata,
11383                         root_config=root_config, installed=installed)
11384                 if type_name == "ebuild":
11385                         settings = self.pkgsettings[root_config.root]
11386                         settings.setcpv(pkg)
11387                         pkg.metadata["USE"] = settings["PORTAGE_USE"]
11388
11389                 return pkg
11390
11391 class MetadataRegen(PollScheduler):
11392
11393         def __init__(self, portdb, max_jobs=None, max_load=None):
11394                 PollScheduler.__init__(self)
11395                 self._portdb = portdb
11396
11397                 if max_jobs is None:
11398                         max_jobs = 1
11399
11400                 self._max_jobs = max_jobs
11401                 self._max_load = max_load
11402                 self._sched_iface = self._sched_iface_class(
11403                         register=self._register,
11404                         schedule=self._schedule_wait,
11405                         unregister=self._unregister)
11406
11407                 self._valid_pkgs = set()
11408                 self._process_iter = self._iter_metadata_processes()
11409                 self.returncode = os.EX_OK
11410                 self._error_count = 0
11411
11412         def _iter_metadata_processes(self):
11413                 portdb = self._portdb
11414                 valid_pkgs = self._valid_pkgs
11415                 every_cp = portdb.cp_all()
11416                 every_cp.sort(reverse=True)
11417
11418                 while every_cp:
11419                         cp = every_cp.pop()
11420                         portage.writemsg_stdout("Processing %s\n" % cp)
11421                         cpv_list = portdb.cp_list(cp)
11422                         for cpv in cpv_list:
11423                                 valid_pkgs.add(cpv)
11424                                 ebuild_path, repo_path = portdb.findname2(cpv)
11425                                 metadata_process = portdb._metadata_process(
11426                                         cpv, ebuild_path, repo_path)
11427                                 if metadata_process is None:
11428                                         continue
11429                                 yield metadata_process
11430
11431         def run(self):
11432
11433                 portdb = self._portdb
11434                 from portage.cache.cache_errors import CacheError
11435                 dead_nodes = {}
11436
11437                 for mytree in portdb.porttrees:
11438                         try:
11439                                 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11440                         except CacheError, e:
11441                                 portage.writemsg("Error listing cache entries for " + \
11442                                         "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1)
11443                                 del e
11444                                 dead_nodes = None
11445                                 break
11446
11447                 while self._schedule():
11448                         self._poll_loop()
11449
11450                 while self._jobs:
11451                         self._poll_loop()
11452
11453                 if dead_nodes:
11454                         for y in self._valid_pkgs:
11455                                 for mytree in portdb.porttrees:
11456                                         if portdb.findname2(y, mytree=mytree)[0]:
11457                                                 dead_nodes[mytree].discard(y)
11458
11459                         for mytree, nodes in dead_nodes.iteritems():
11460                                 auxdb = portdb.auxdb[mytree]
11461                                 for y in nodes:
11462                                         try:
11463                                                 del auxdb[y]
11464                                         except (KeyError, CacheError):
11465                                                 pass
11466
11467         def _schedule_tasks(self):
11468                 """
11469                 @rtype: bool
11470                 @returns: True if there may be remaining tasks to schedule,
11471                         False otherwise.
11472                 """
11473                 while self._can_add_job():
11474                         try:
11475                                 metadata_process = self._process_iter.next()
11476                         except StopIteration:
11477                                 return False
11478
11479                         self._jobs += 1
11480                         metadata_process.scheduler = self._sched_iface
11481                         metadata_process.addExitListener(self._metadata_exit)
11482                         metadata_process.start()
11483                 return True
11484
11485         def _metadata_exit(self, metadata_process):
11486                 self._jobs -= 1
11487                 if metadata_process.returncode != os.EX_OK:
11488                         self.returncode = 1
11489                         self._error_count += 1
11490                         self._valid_pkgs.discard(metadata_process.cpv)
11491                         portage.writemsg("Error processing %s, continuing...\n" % \
11492                                 (metadata_process.cpv,))
11493                 self._schedule()
11494
11495 class UninstallFailure(portage.exception.PortageException):
11496         """
11497         An instance of this class is raised by unmerge() when
11498         an uninstallation fails.
11499         """
11500         status = 1
11501         def __init__(self, *pargs):
11502                 portage.exception.PortageException.__init__(self, pargs)
11503                 if pargs:
11504                         self.status = pargs[0]
11505
11506 def unmerge(root_config, myopts, unmerge_action,
11507         unmerge_files, ldpath_mtimes, autoclean=0,
11508         clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11509         scheduler=None, writemsg_level=portage.util.writemsg_level):
11510
11511         quiet = "--quiet" in myopts
11512         settings = root_config.settings
11513         sets = root_config.sets
11514         vartree = root_config.trees["vartree"]
11515         candidate_catpkgs=[]
11516         global_unmerge=0
11517         xterm_titles = "notitles" not in settings.features
11518         out = portage.output.EOutput()
11519         pkg_cache = {}
11520         db_keys = list(vartree.dbapi._aux_cache_keys)
11521
11522         def _pkg(cpv):
11523                 pkg = pkg_cache.get(cpv)
11524                 if pkg is None:
11525                         pkg = Package(cpv=cpv, installed=True,
11526                                 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11527                                 root_config=root_config,
11528                                 type_name="installed")
11529                         pkg_cache[cpv] = pkg
11530                 return pkg
11531
11532         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11533         try:
11534                 # At least the parent needs to exist for the lock file.
11535                 portage.util.ensure_dirs(vdb_path)
11536         except portage.exception.PortageException:
11537                 pass
11538         vdb_lock = None
11539         try:
11540                 if os.access(vdb_path, os.W_OK):
11541                         vdb_lock = portage.locks.lockdir(vdb_path)
11542                 realsyslist = sets["system"].getAtoms()
11543                 syslist = []
11544                 for x in realsyslist:
11545                         mycp = portage.dep_getkey(x)
11546                         if mycp in settings.getvirtuals():
11547                                 providers = []
11548                                 for provider in settings.getvirtuals()[mycp]:
11549                                         if vartree.dbapi.match(provider):
11550                                                 providers.append(provider)
11551                                 if len(providers) == 1:
11552                                         syslist.extend(providers)
11553                         else:
11554                                 syslist.append(mycp)
11555         
11556                 mysettings = portage.config(clone=settings)
11557         
11558                 if not unmerge_files:
11559                         if unmerge_action == "unmerge":
11560                                 print
11561                                 print bold("emerge unmerge") + " can only be used with specific package names"
11562                                 print
11563                                 return 0
11564                         else:
11565                                 global_unmerge = 1
11566         
11567                 localtree = vartree
11568                 # process all arguments and add all
11569                 # valid db entries to candidate_catpkgs
11570                 if global_unmerge:
11571                         if not unmerge_files:
11572                                 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11573                 else:
11574                         #we've got command-line arguments
11575                         if not unmerge_files:
11576                                 print "\nNo packages to unmerge have been provided.\n"
11577                                 return 0
11578                         for x in unmerge_files:
11579                                 arg_parts = x.split('/')
11580                                 if x[0] not in [".","/"] and \
11581                                         arg_parts[-1][-7:] != ".ebuild":
11582                                         #possible cat/pkg or dep; treat as such
11583                                         candidate_catpkgs.append(x)
11584                                 elif unmerge_action in ["prune","clean"]:
11585                                         print "\n!!! Prune and clean do not accept individual" + \
11586                                                 " ebuilds as arguments;\n    skipping.\n"
11587                                         continue
11588                                 else:
11589                                         # it appears that the user is specifying an installed
11590                                         # ebuild and we're in "unmerge" mode, so it's ok.
11591                                         if not os.path.exists(x):
11592                                                 print "\n!!! The path '"+x+"' doesn't exist.\n"
11593                                                 return 0
11594         
11595                                         absx   = os.path.abspath(x)
11596                                         sp_absx = absx.split("/")
11597                                         if sp_absx[-1][-7:] == ".ebuild":
11598                                                 del sp_absx[-1]
11599                                                 absx = "/".join(sp_absx)
11600         
11601                                         sp_absx_len = len(sp_absx)
11602         
11603                                         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11604                                         vdb_len  = len(vdb_path)
11605         
11606                                         sp_vdb     = vdb_path.split("/")
11607                                         sp_vdb_len = len(sp_vdb)
11608         
11609                                         if not os.path.exists(absx+"/CONTENTS"):
11610                                                 print "!!! Not a valid db dir: "+str(absx)
11611                                                 return 0
11612         
11613                                         if sp_absx_len <= sp_vdb_len:
11614                                                 # The Path is shorter... so it can't be inside the vdb.
11615                                                 print sp_absx
11616                                                 print absx
11617                                                 print "\n!!!",x,"cannot be inside "+ \
11618                                                         vdb_path+"; aborting.\n"
11619                                                 return 0
11620         
11621                                         for idx in range(0,sp_vdb_len):
11622                                                 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11623                                                         print sp_absx
11624                                                         print absx
11625                                                         print "\n!!!", x, "is not inside "+\
11626                                                                 vdb_path+"; aborting.\n"
11627                                                         return 0
11628         
11629                                         print "="+"/".join(sp_absx[sp_vdb_len:])
11630                                         candidate_catpkgs.append(
11631                                                 "="+"/".join(sp_absx[sp_vdb_len:]))
11632         
11633                 newline=""
11634                 if (not "--quiet" in myopts):
11635                         newline="\n"
11636                 if settings["ROOT"] != "/":
11637                         writemsg_level(darkgreen(newline+ \
11638                                 ">>> Using system located in ROOT tree %s\n" % \
11639                                 settings["ROOT"]))
11640
11641                 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11642                         not ("--quiet" in myopts):
11643                         writemsg_level(darkgreen(newline+\
11644                                 ">>> These are the packages that would be unmerged:\n"))
11645
11646                 # Preservation of order is required for --depclean and --prune so
11647                 # that dependencies are respected. Use all_selected to eliminate
11648                 # duplicate packages since the same package may be selected by
11649                 # multiple atoms.
11650                 pkgmap = []
11651                 all_selected = set()
11652                 for x in candidate_catpkgs:
11653                         # cycle through all our candidate deps and determine
11654                         # what will and will not get unmerged
11655                         try:
11656                                 mymatch = vartree.dbapi.match(x)
11657                         except portage.exception.AmbiguousPackageName, errpkgs:
11658                                 print "\n\n!!! The short ebuild name \"" + \
11659                                         x + "\" is ambiguous.  Please specify"
11660                                 print "!!! one of the following fully-qualified " + \
11661                                         "ebuild names instead:\n"
11662                                 for i in errpkgs[0]:
11663                                         print "    " + green(i)
11664                                 print
11665                                 sys.exit(1)
11666         
11667                         if not mymatch and x[0] not in "<>=~":
11668                                 mymatch = localtree.dep_match(x)
11669                         if not mymatch:
11670                                 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11671                                         (x, unmerge_action), noiselevel=-1)
11672                                 continue
11673
11674                         pkgmap.append(
11675                                 {"protected": set(), "selected": set(), "omitted": set()})
11676                         mykey = len(pkgmap) - 1
11677                         if unmerge_action=="unmerge":
11678                                         for y in mymatch:
11679                                                 if y not in all_selected:
11680                                                         pkgmap[mykey]["selected"].add(y)
11681                                                         all_selected.add(y)
11682                         elif unmerge_action == "prune":
11683                                 if len(mymatch) == 1:
11684                                         continue
11685                                 best_version = mymatch[0]
11686                                 best_slot = vartree.getslot(best_version)
11687                                 best_counter = vartree.dbapi.cpv_counter(best_version)
11688                                 for mypkg in mymatch[1:]:
11689                                         myslot = vartree.getslot(mypkg)
11690                                         mycounter = vartree.dbapi.cpv_counter(mypkg)
11691                                         if (myslot == best_slot and mycounter > best_counter) or \
11692                                                 mypkg == portage.best([mypkg, best_version]):
11693                                                 if myslot == best_slot:
11694                                                         if mycounter < best_counter:
11695                                                                 # On slot collision, keep the one with the
11696                                                                 # highest counter since it is the most
11697                                                                 # recently installed.
11698                                                                 continue
11699                                                 best_version = mypkg
11700                                                 best_slot = myslot
11701                                                 best_counter = mycounter
11702                                 pkgmap[mykey]["protected"].add(best_version)
11703                                 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
11704                                         if mypkg != best_version and mypkg not in all_selected)
11705                                 all_selected.update(pkgmap[mykey]["selected"])
11706                         else:
11707                                 # unmerge_action == "clean"
11708                                 slotmap={}
11709                                 for mypkg in mymatch:
11710                                         if unmerge_action == "clean":
11711                                                 myslot = localtree.getslot(mypkg)
11712                                         else:
11713                                                 # since we're pruning, we don't care about slots
11714                                                 # and put all the pkgs in together
11715                                                 myslot = 0
11716                                         if myslot not in slotmap:
11717                                                 slotmap[myslot] = {}
11718                                         slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
11719
11720                                 for mypkg in vartree.dbapi.cp_list(
11721                                         portage.dep_getkey(mymatch[0])):
11722                                         myslot = vartree.getslot(mypkg)
11723                                         if myslot not in slotmap:
11724                                                 slotmap[myslot] = {}
11725                                         slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
11726
11727                                 for myslot in slotmap:
11728                                         counterkeys = slotmap[myslot].keys()
11729                                         if not counterkeys:
11730                                                 continue
11731                                         counterkeys.sort()
11732                                         pkgmap[mykey]["protected"].add(
11733                                                 slotmap[myslot][counterkeys[-1]])
11734                                         del counterkeys[-1]
11735
11736                                         for counter in counterkeys[:]:
11737                                                 mypkg = slotmap[myslot][counter]
11738                                                 if mypkg not in mymatch:
11739                                                         counterkeys.remove(counter)
11740                                                         pkgmap[mykey]["protected"].add(
11741                                                                 slotmap[myslot][counter])
11742
11743                                         #be pretty and get them in order of merge:
11744                                         for ckey in counterkeys:
11745                                                 mypkg = slotmap[myslot][ckey]
11746                                                 if mypkg not in all_selected:
11747                                                         pkgmap[mykey]["selected"].add(mypkg)
11748                                                         all_selected.add(mypkg)
11749                                         # ok, now the last-merged package
11750                                         # is protected, and the rest are selected
11751                 numselected = len(all_selected)
11752                 if global_unmerge and not numselected:
11753                         portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
11754                         return 0
11755         
11756                 if not numselected:
11757                         portage.writemsg_stdout(
11758                                 "\n>>> No packages selected for removal by " + \
11759                                 unmerge_action + "\n")
11760                         return 0
11761         finally:
11762                 if vdb_lock:
11763                         vartree.dbapi.flush_cache()
11764                         portage.locks.unlockdir(vdb_lock)
11765         
11766         from portage.sets.base import EditablePackageSet
11767         
11768         # generate a list of package sets that are directly or indirectly listed in "world",
11769         # as there is no persistent list of "installed" sets
11770         installed_sets = ["world"]
11771         stop = False
11772         pos = 0
11773         while not stop:
11774                 stop = True
11775                 pos = len(installed_sets)
11776                 for s in installed_sets[pos - 1:]:
11777                         if s not in sets:
11778                                 continue
11779                         candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
11780                         if candidates:
11781                                 stop = False
11782                                 installed_sets += candidates
11783         installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
11784         del stop, pos
11785
11786         # we don't want to unmerge packages that are still listed in user-editable package sets
11787         # listed in "world" as they would be remerged on the next update of "world" or the 
11788         # relevant package sets.
11789         unknown_sets = set()
11790         for cp in xrange(len(pkgmap)):
11791                 for cpv in pkgmap[cp]["selected"].copy():
11792                         try:
11793                                 pkg = _pkg(cpv)
11794                         except KeyError:
11795                                 # It could have been uninstalled
11796                                 # by a concurrent process.
11797                                 continue
11798
11799                         if unmerge_action != "clean" and \
11800                                 root_config.root == "/" and \
11801                                 portage.match_from_list(
11802                                 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
11803                                 msg = ("Not unmerging package %s since there is no valid " + \
11804                                 "reason for portage to unmerge itself.") % (pkg.cpv,)
11805                                 for line in textwrap.wrap(msg, 75):
11806                                         out.eerror(line)
11807                                 # adjust pkgmap so the display output is correct
11808                                 pkgmap[cp]["selected"].remove(cpv)
11809                                 all_selected.remove(cpv)
11810                                 pkgmap[cp]["protected"].add(cpv)
11811                                 continue
11812
11813                         parents = []
11814                         for s in installed_sets:
11815                                 # skip sets that the user requested to unmerge, and skip world 
11816                                 # unless we're unmerging a package set (as the package would be 
11817                                 # removed from "world" later on)
11818                                 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
11819                                         continue
11820
11821                                 if s not in sets:
11822                                         if s in unknown_sets:
11823                                                 continue
11824                                         unknown_sets.add(s)
11825                                         out = portage.output.EOutput()
11826                                         out.eerror(("Unknown set '@%s' in " + \
11827                                                 "%svar/lib/portage/world_sets") % \
11828                                                 (s, root_config.root))
11829                                         continue
11830
11831                                 # only check instances of EditablePackageSet as other classes are generally used for
11832                                 # special purposes and can be ignored here (and are usually generated dynamically, so the
11833                                 # user can't do much about them anyway)
11834                                 if isinstance(sets[s], EditablePackageSet):
11835
11836                                         # This is derived from a snippet of code in the
11837                                         # depgraph._iter_atoms_for_pkg() method.
11838                                         for atom in sets[s].iterAtomsForPackage(pkg):
11839                                                 inst_matches = vartree.dbapi.match(atom)
11840                                                 inst_matches.reverse() # descending order
11841                                                 higher_slot = None
11842                                                 for inst_cpv in inst_matches:
11843                                                         try:
11844                                                                 inst_pkg = _pkg(inst_cpv)
11845                                                         except KeyError:
11846                                                                 # It could have been uninstalled
11847                                                                 # by a concurrent process.
11848                                                                 continue
11849
11850                                                         if inst_pkg.cp != atom.cp:
11851                                                                 continue
11852                                                         if pkg >= inst_pkg:
11853                                                                 # This is descending order, and we're not
11854                                                                 # interested in any versions <= pkg given.
11855                                                                 break
11856                                                         if pkg.slot_atom != inst_pkg.slot_atom:
11857                                                                 higher_slot = inst_pkg
11858                                                                 break
11859                                                 if higher_slot is None:
11860                                                         parents.append(s)
11861                                                         break
11862                         if parents:
11863                                 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
11864                                 #print colorize("WARN", "but still listed in the following package sets:")
11865                                 #print "    %s\n" % ", ".join(parents)
11866                                 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
11867                                 print colorize("WARN", "still referenced by the following package sets:")
11868                                 print "    %s\n" % ", ".join(parents)
11869                                 # adjust pkgmap so the display output is correct
11870                                 pkgmap[cp]["selected"].remove(cpv)
11871                                 all_selected.remove(cpv)
11872                                 pkgmap[cp]["protected"].add(cpv)
11873         
11874         del installed_sets
11875
11876         numselected = len(all_selected)
11877         if not numselected:
11878                 writemsg_level(
11879                         "\n>>> No packages selected for removal by " + \
11880                         unmerge_action + "\n")
11881                 return 0
11882
11883         # Unmerge order only matters in some cases
11884         if not ordered:
11885                 unordered = {}
11886                 for d in pkgmap:
11887                         selected = d["selected"]
11888                         if not selected:
11889                                 continue
11890                         cp = portage.cpv_getkey(iter(selected).next())
11891                         cp_dict = unordered.get(cp)
11892                         if cp_dict is None:
11893                                 cp_dict = {}
11894                                 unordered[cp] = cp_dict
11895                                 for k in d:
11896                                         cp_dict[k] = set()
11897                         for k, v in d.iteritems():
11898                                 cp_dict[k].update(v)
11899                 pkgmap = [unordered[cp] for cp in sorted(unordered)]
11900
11901         for x in xrange(len(pkgmap)):
11902                 selected = pkgmap[x]["selected"]
11903                 if not selected:
11904                         continue
11905                 for mytype, mylist in pkgmap[x].iteritems():
11906                         if mytype == "selected":
11907                                 continue
11908                         mylist.difference_update(all_selected)
11909                 cp = portage.cpv_getkey(iter(selected).next())
11910                 for y in localtree.dep_match(cp):
11911                         if y not in pkgmap[x]["omitted"] and \
11912                                 y not in pkgmap[x]["selected"] and \
11913                                 y not in pkgmap[x]["protected"] and \
11914                                 y not in all_selected:
11915                                 pkgmap[x]["omitted"].add(y)
11916                 if global_unmerge and not pkgmap[x]["selected"]:
11917                         #avoid cluttering the preview printout with stuff that isn't getting unmerged
11918                         continue
11919                 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
11920                         writemsg_level(colorize("BAD","\a\n\n!!! " + \
11921                                 "'%s' is part of your system profile.\n" % cp),
11922                                 level=logging.WARNING, noiselevel=-1)
11923                         writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
11924                                 "be damaging to your system.\n\n"),
11925                                 level=logging.WARNING, noiselevel=-1)
11926                         if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
11927                                 countdown(int(settings["EMERGE_WARNING_DELAY"]),
11928                                         colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
11929                 if not quiet:
11930                         writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
11931                 else:
11932                         writemsg_level(bold(cp) + ": ", noiselevel=-1)
11933                 for mytype in ["selected","protected","omitted"]:
11934                         if not quiet:
11935                                 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
11936                         if pkgmap[x][mytype]:
11937                                 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
11938                                 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
11939                                 for pn, ver, rev in sorted_pkgs:
11940                                         if rev == "r0":
11941                                                 myversion = ver
11942                                         else:
11943                                                 myversion = ver + "-" + rev
11944                                         if mytype == "selected":
11945                                                 writemsg_level(
11946                                                         colorize("UNMERGE_WARN", myversion + " "),
11947                                                         noiselevel=-1)
11948                                         else:
11949                                                 writemsg_level(
11950                                                         colorize("GOOD", myversion + " "), noiselevel=-1)
11951                         else:
11952                                 writemsg_level("none ", noiselevel=-1)
11953                         if not quiet:
11954                                 writemsg_level("\n", noiselevel=-1)
11955                 if quiet:
11956                         writemsg_level("\n", noiselevel=-1)
11957
11958         writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
11959                 " packages are slated for removal.\n")
11960         writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
11961                         " and " + colorize("GOOD", "'omitted'") + \
11962                         " packages will not be removed.\n\n")
11963
11964         if "--pretend" in myopts:
11965                 #we're done... return
11966                 return 0
11967         if "--ask" in myopts:
11968                 if userquery("Would you like to unmerge these packages?")=="No":
11969                         # enter pretend mode for correct formatting of results
11970                         myopts["--pretend"] = True
11971                         print
11972                         print "Quitting."
11973                         print
11974                         return 0
11975         #the real unmerging begins, after a short delay....
11976         if clean_delay and not autoclean:
11977                 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
11978
11979         for x in xrange(len(pkgmap)):
11980                 for y in pkgmap[x]["selected"]:
11981                         writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
11982                         emergelog(xterm_titles, "=== Unmerging... ("+y+")")
11983                         mysplit = y.split("/")
11984                         #unmerge...
11985                         retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
11986                                 mysettings, unmerge_action not in ["clean","prune"],
11987                                 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
11988                                 scheduler=scheduler)
11989
11990                         if retval != os.EX_OK:
11991                                 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
11992                                 if raise_on_error:
11993                                         raise UninstallFailure(retval)
11994                                 sys.exit(retval)
11995                         else:
11996                                 if clean_world and hasattr(sets["world"], "cleanPackage"):
11997                                         sets["world"].cleanPackage(vartree.dbapi, y)
11998                                 emergelog(xterm_titles, " >>> unmerge success: "+y)
11999         if clean_world and hasattr(sets["world"], "remove"):
12000                 for s in root_config.setconfig.active:
12001                         sets["world"].remove(SETPREFIX+s)
12002         return 1
12003
12004 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
12005
12006         if os.path.exists("/usr/bin/install-info"):
12007                 out = portage.output.EOutput()
12008                 regen_infodirs=[]
12009                 for z in infodirs:
12010                         if z=='':
12011                                 continue
12012                         inforoot=normpath(root+z)
12013                         if os.path.isdir(inforoot):
12014                                 infomtime = long(os.stat(inforoot).st_mtime)
12015                                 if inforoot not in prev_mtimes or \
12016                                         prev_mtimes[inforoot] != infomtime:
12017                                                 regen_infodirs.append(inforoot)
12018
12019                 if not regen_infodirs:
12020                         portage.writemsg_stdout("\n")
12021                         out.einfo("GNU info directory index is up-to-date.")
12022                 else:
12023                         portage.writemsg_stdout("\n")
12024                         out.einfo("Regenerating GNU info directory index...")
12025
12026                         dir_extensions = ("", ".gz", ".bz2")
12027                         icount=0
12028                         badcount=0
12029                         errmsg = ""
12030                         for inforoot in regen_infodirs:
12031                                 if inforoot=='':
12032                                         continue
12033
12034                                 if not os.path.isdir(inforoot) or \
12035                                         not os.access(inforoot, os.W_OK):
12036                                         continue
12037
12038                                 file_list = os.listdir(inforoot)
12039                                 file_list.sort()
12040                                 dir_file = os.path.join(inforoot, "dir")
12041                                 moved_old_dir = False
12042                                 processed_count = 0
12043                                 for x in file_list:
12044                                         if x.startswith(".") or \
12045                                                 os.path.isdir(os.path.join(inforoot, x)):
12046                                                 continue
12047                                         if x.startswith("dir"):
12048                                                 skip = False
12049                                                 for ext in dir_extensions:
12050                                                         if x == "dir" + ext or \
12051                                                                 x == "dir" + ext + ".old":
12052                                                                 skip = True
12053                                                                 break
12054                                                 if skip:
12055                                                         continue
12056                                         if processed_count == 0:
12057                                                 for ext in dir_extensions:
12058                                                         try:
12059                                                                 os.rename(dir_file + ext, dir_file + ext + ".old")
12060                                                                 moved_old_dir = True
12061                                                         except EnvironmentError, e:
12062                                                                 if e.errno != errno.ENOENT:
12063                                                                         raise
12064                                                                 del e
12065                                         processed_count += 1
12066                                         myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
12067                                         existsstr="already exists, for file `"
12068                                         if myso!="":
12069                                                 if re.search(existsstr,myso):
12070                                                         # Already exists... Don't increment the count for this.
12071                                                         pass
12072                                                 elif myso[:44]=="install-info: warning: no info dir entry in ":
12073                                                         # This info file doesn't contain a DIR-header: install-info produces this
12074                                                         # (harmless) warning (the --quiet switch doesn't seem to work).
12075                                                         # Don't increment the count for this.
12076                                                         pass
12077                                                 else:
12078                                                         badcount=badcount+1
12079                                                         errmsg += myso + "\n"
12080                                         icount=icount+1
12081
12082                                 if moved_old_dir and not os.path.exists(dir_file):
12083                                         # We didn't generate a new dir file, so put the old file
12084                                         # back where it was originally found.
12085                                         for ext in dir_extensions:
12086                                                 try:
12087                                                         os.rename(dir_file + ext + ".old", dir_file + ext)
12088                                                 except EnvironmentError, e:
12089                                                         if e.errno != errno.ENOENT:
12090                                                                 raise
12091                                                         del e
12092
12093                                 # Clean dir.old cruft so that they don't prevent
12094                                 # unmerge of otherwise empty directories.
12095                                 for ext in dir_extensions:
12096                                         try:
12097                                                 os.unlink(dir_file + ext + ".old")
12098                                         except EnvironmentError, e:
12099                                                 if e.errno != errno.ENOENT:
12100                                                         raise
12101                                                 del e
12102
12103                                 #update mtime so we can potentially avoid regenerating.
12104                                 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
12105
12106                         if badcount:
12107                                 out.eerror("Processed %d info files; %d errors." % \
12108                                         (icount, badcount))
12109                                 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
12110                         else:
12111                                 if icount > 0:
12112                                         out.einfo("Processed %d info files." % (icount,))
12113
12114
12115 def display_news_notification(root_config, myopts):
12116         target_root = root_config.root
12117         trees = root_config.trees
12118         settings = trees["vartree"].settings
12119         portdb = trees["porttree"].dbapi
12120         vardb = trees["vartree"].dbapi
12121         NEWS_PATH = os.path.join("metadata", "news")
12122         UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
12123         newsReaderDisplay = False
12124         update = "--pretend" not in myopts
12125
12126         for repo in portdb.getRepositories():
12127                 unreadItems = checkUpdatedNewsItems(
12128                         portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
12129                 if unreadItems:
12130                         if not newsReaderDisplay:
12131                                 newsReaderDisplay = True
12132                                 print
12133                         print colorize("WARN", " * IMPORTANT:"),
12134                         print "%s news items need reading for repository '%s'." % (unreadItems, repo)
12135                         
12136         
12137         if newsReaderDisplay:
12138                 print colorize("WARN", " *"),
12139                 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
12140                 print
12141
12142 def display_preserved_libs(vardbapi):
12143         MAX_DISPLAY = 3
12144
12145         # Ensure the registry is consistent with existing files.
12146         vardbapi.plib_registry.pruneNonExisting()
12147
12148         if vardbapi.plib_registry.hasEntries():
12149                 print
12150                 print colorize("WARN", "!!!") + " existing preserved libs:"
12151                 plibdata = vardbapi.plib_registry.getPreservedLibs()
12152                 linkmap = vardbapi.linkmap
12153                 consumer_map = {}
12154                 owners = {}
12155                 linkmap_broken = False
12156
12157                 try:
12158                         linkmap.rebuild()
12159                 except portage.exception.CommandNotFound, e:
12160                         writemsg_level("!!! Command Not Found: %s\n" % (e,),
12161                                 level=logging.ERROR, noiselevel=-1)
12162                         del e
12163                         linkmap_broken = True
12164                 else:
12165                         search_for_owners = set()
12166                         for cpv in plibdata:
12167                                 internal_plib_keys = set(linkmap._obj_key(f) \
12168                                         for f in plibdata[cpv])
12169                                 for f in plibdata[cpv]:
12170                                         if f in consumer_map:
12171                                                 continue
12172                                         consumers = []
12173                                         for c in linkmap.findConsumers(f):
12174                                                 # Filter out any consumers that are also preserved libs
12175                                                 # belonging to the same package as the provider.
12176                                                 if linkmap._obj_key(c) not in internal_plib_keys:
12177                                                         consumers.append(c)
12178                                         consumers.sort()
12179                                         consumer_map[f] = consumers
12180                                         search_for_owners.update(consumers[:MAX_DISPLAY+1])
12181
12182                         owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
12183
12184                 for cpv in plibdata:
12185                         print colorize("WARN", ">>>") + " package: %s" % cpv
12186                         samefile_map = {}
12187                         for f in plibdata[cpv]:
12188                                 obj_key = linkmap._obj_key(f)
12189                                 alt_paths = samefile_map.get(obj_key)
12190                                 if alt_paths is None:
12191                                         alt_paths = set()
12192                                         samefile_map[obj_key] = alt_paths
12193                                 alt_paths.add(f)
12194
12195                         for alt_paths in samefile_map.itervalues():
12196                                 alt_paths = sorted(alt_paths)
12197                                 for p in alt_paths:
12198                                         print colorize("WARN", " * ") + " - %s" % (p,)
12199                                 f = alt_paths[0]
12200                                 consumers = consumer_map.get(f, [])
12201                                 for c in consumers[:MAX_DISPLAY]:
12202                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12203                                                 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
12204                                 if len(consumers) == MAX_DISPLAY + 1:
12205                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12206                                                 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
12207                                                 for x in owners.get(consumers[MAX_DISPLAY], [])))
12208                                 elif len(consumers) > MAX_DISPLAY:
12209                                         print colorize("WARN", " * ") + "     used by %d other files" % (len(consumers) - MAX_DISPLAY)
12210                 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
12211
12212
12213 def _flush_elog_mod_echo():
12214         """
12215         Dump the mod_echo output now so that our other
12216         notifications are shown last.
12217         @rtype: bool
12218         @returns: True if messages were shown, False otherwise.
12219         """
12220         messages_shown = False
12221         try:
12222                 from portage.elog import mod_echo
12223         except ImportError:
12224                 pass # happens during downgrade to a version without the module
12225         else:
12226                 messages_shown = bool(mod_echo._items)
12227                 mod_echo.finalize()
12228         return messages_shown
12229
12230 def post_emerge(root_config, myopts, mtimedb, retval):
12231         """
12232         Misc. things to run at the end of a merge session.
12233         
12234         Update Info Files
12235         Update Config Files
12236         Update News Items
12237         Commit mtimeDB
12238         Display preserved libs warnings
12239         Exit Emerge
12240
12241         @param trees: A dictionary mapping each ROOT to it's package databases
12242         @type trees: dict
12243         @param mtimedb: The mtimeDB to store data needed across merge invocations
12244         @type mtimedb: MtimeDB class instance
12245         @param retval: Emerge's return value
12246         @type retval: Int
12247         @rype: None
12248         @returns:
12249         1.  Calls sys.exit(retval)
12250         """
12251
12252         target_root = root_config.root
12253         trees = { target_root : root_config.trees }
12254         vardbapi = trees[target_root]["vartree"].dbapi
12255         settings = vardbapi.settings
12256         info_mtimes = mtimedb["info"]
12257
12258         # Load the most current variables from ${ROOT}/etc/profile.env
12259         settings.unlock()
12260         settings.reload()
12261         settings.regenerate()
12262         settings.lock()
12263
12264         config_protect = settings.get("CONFIG_PROTECT","").split()
12265         infodirs = settings.get("INFOPATH","").split(":") + \
12266                 settings.get("INFODIR","").split(":")
12267
12268         os.chdir("/")
12269
12270         if retval == os.EX_OK:
12271                 exit_msg = " *** exiting successfully."
12272         else:
12273                 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
12274         emergelog("notitles" not in settings.features, exit_msg)
12275
12276         _flush_elog_mod_echo()
12277
12278         counter_hash = settings.get("PORTAGE_COUNTER_HASH")
12279         if "--pretend" in myopts or (counter_hash is not None and \
12280                 counter_hash == vardbapi._counter_hash()):
12281                 display_news_notification(root_config, myopts)
12282                 # If vdb state has not changed then there's nothing else to do.
12283                 sys.exit(retval)
12284
12285         vdb_path = os.path.join(target_root, portage.VDB_PATH)
12286         portage.util.ensure_dirs(vdb_path)
12287         vdb_lock = None
12288         if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
12289                 vdb_lock = portage.locks.lockdir(vdb_path)
12290
12291         if vdb_lock:
12292                 try:
12293                         if "noinfo" not in settings.features:
12294                                 chk_updated_info_files(target_root,
12295                                         infodirs, info_mtimes, retval)
12296                         mtimedb.commit()
12297                 finally:
12298                         if vdb_lock:
12299                                 portage.locks.unlockdir(vdb_lock)
12300
12301         chk_updated_cfg_files(target_root, config_protect)
12302         
12303         display_news_notification(root_config, myopts)
12304         if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
12305                 display_preserved_libs(vardbapi)        
12306
12307         sys.exit(retval)
12308
12309
12310 def chk_updated_cfg_files(target_root, config_protect):
12311         if config_protect:
12312                 #number of directories with some protect files in them
12313                 procount=0
12314                 for x in config_protect:
12315                         x = os.path.join(target_root, x.lstrip(os.path.sep))
12316                         if not os.access(x, os.W_OK):
12317                                 # Avoid Permission denied errors generated
12318                                 # later by `find`.
12319                                 continue
12320                         try:
12321                                 mymode = os.lstat(x).st_mode
12322                         except OSError:
12323                                 continue
12324                         if stat.S_ISLNK(mymode):
12325                                 # We want to treat it like a directory if it
12326                                 # is a symlink to an existing directory.
12327                                 try:
12328                                         real_mode = os.stat(x).st_mode
12329                                         if stat.S_ISDIR(real_mode):
12330                                                 mymode = real_mode
12331                                 except OSError:
12332                                         pass
12333                         if stat.S_ISDIR(mymode):
12334                                 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
12335                         else:
12336                                 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
12337                                         os.path.split(x.rstrip(os.path.sep))
12338                         mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
12339                         a = commands.getstatusoutput(mycommand)
12340                         if a[0] != 0:
12341                                 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
12342                                 sys.stderr.flush()
12343                                 # Show the error message alone, sending stdout to /dev/null.
12344                                 os.system(mycommand + " 1>/dev/null")
12345                         else:
12346                                 files = a[1].split('\0')
12347                                 # split always produces an empty string as the last element
12348                                 if files and not files[-1]:
12349                                         del files[-1]
12350                                 if files:
12351                                         procount += 1
12352                                         print "\n"+colorize("WARN", " * IMPORTANT:"),
12353                                         if stat.S_ISDIR(mymode):
12354                                                  print "%d config files in '%s' need updating." % \
12355                                                         (len(files), x)
12356                                         else:
12357                                                  print "config file '%s' needs updating." % x
12358
12359                 if procount:
12360                         print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
12361                                 " section of the " + bold("emerge")
12362                         print " "+yellow("*")+" man page to learn how to update config files."
12363
12364 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12365         update=False):
12366         """
12367         Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12368         Returns the number of unread (yet relevent) items.
12369         
12370         @param portdb: a portage tree database
12371         @type portdb: pordbapi
12372         @param vardb: an installed package database
12373         @type vardb: vardbapi
12374         @param NEWS_PATH:
12375         @type NEWS_PATH:
12376         @param UNREAD_PATH:
12377         @type UNREAD_PATH:
12378         @param repo_id:
12379         @type repo_id:
12380         @rtype: Integer
12381         @returns:
12382         1.  The number of unread but relevant news items.
12383         
12384         """
12385         from portage.news import NewsManager
12386         manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12387         return manager.getUnreadItems( repo_id, update=update )
12388
12389 def insert_category_into_atom(atom, category):
12390         alphanum = re.search(r'\w', atom)
12391         if alphanum:
12392                 ret = atom[:alphanum.start()] + "%s/" % category + \
12393                         atom[alphanum.start():]
12394         else:
12395                 ret = None
12396         return ret
12397
12398 def is_valid_package_atom(x):
12399         if "/" not in x:
12400                 alphanum = re.search(r'\w', x)
12401                 if alphanum:
12402                         x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12403         return portage.isvalidatom(x)
12404
12405 def show_blocker_docs_link():
12406         print
12407         print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12408         print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12409         print
12410         print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12411         print
12412
12413 def show_mask_docs():
12414         print "For more information, see the MASKED PACKAGES section in the emerge"
12415         print "man page or refer to the Gentoo Handbook."
12416
12417 def action_sync(settings, trees, mtimedb, myopts, myaction):
12418         xterm_titles = "notitles" not in settings.features
12419         emergelog(xterm_titles, " === sync")
12420         myportdir = settings.get("PORTDIR", None)
12421         out = portage.output.EOutput()
12422         if not myportdir:
12423                 sys.stderr.write("!!! PORTDIR is undefined.  Is /etc/make.globals missing?\n")
12424                 sys.exit(1)
12425         if myportdir[-1]=="/":
12426                 myportdir=myportdir[:-1]
12427         try:
12428                 st = os.stat(myportdir)
12429         except OSError:
12430                 st = None
12431         if st is None:
12432                 print ">>>",myportdir,"not found, creating it."
12433                 os.makedirs(myportdir,0755)
12434                 st = os.stat(myportdir)
12435
12436         spawn_kwargs = {}
12437         spawn_kwargs["env"] = settings.environ()
12438         if 'usersync' in settings.features and \
12439                 portage.data.secpass >= 2 and \
12440                 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12441                 st.st_gid != os.getgid() and st.st_mode & 0070):
12442                 try:
12443                         homedir = pwd.getpwuid(st.st_uid).pw_dir
12444                 except KeyError:
12445                         pass
12446                 else:
12447                         # Drop privileges when syncing, in order to match
12448                         # existing uid/gid settings.
12449                         spawn_kwargs["uid"]    = st.st_uid
12450                         spawn_kwargs["gid"]    = st.st_gid
12451                         spawn_kwargs["groups"] = [st.st_gid]
12452                         spawn_kwargs["env"]["HOME"] = homedir
12453                         umask = 0002
12454                         if not st.st_mode & 0020:
12455                                 umask = umask | 0020
12456                         spawn_kwargs["umask"] = umask
12457
12458         syncuri = settings.get("SYNC", "").strip()
12459         if not syncuri:
12460                 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12461                         noiselevel=-1, level=logging.ERROR)
12462                 return 1
12463
12464         vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12465         vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12466
12467         os.umask(0022)
12468         dosyncuri = syncuri
12469         updatecache_flg = False
12470         if myaction == "metadata":
12471                 print "skipping sync"
12472                 updatecache_flg = True
12473         elif ".git" in vcs_dirs:
12474                 # Update existing git repository, and ignore the syncuri. We are
12475                 # going to trust the user and assume that the user is in the branch
12476                 # that he/she wants updated. We'll let the user manage branches with
12477                 # git directly.
12478                 if portage.process.find_binary("git") is None:
12479                         msg = ["Command not found: git",
12480                         "Type \"emerge dev-util/git\" to enable git support."]
12481                         for l in msg:
12482                                 writemsg_level("!!! %s\n" % l,
12483                                         level=logging.ERROR, noiselevel=-1)
12484                         return 1
12485                 msg = ">>> Starting git pull in %s..." % myportdir
12486                 emergelog(xterm_titles, msg )
12487                 writemsg_level(msg + "\n")
12488                 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12489                         (portage._shell_quote(myportdir),), **spawn_kwargs)
12490                 if exitcode != os.EX_OK:
12491                         msg = "!!! git pull error in %s." % myportdir
12492                         emergelog(xterm_titles, msg)
12493                         writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12494                         return exitcode
12495                 msg = ">>> Git pull in %s successful" % myportdir
12496                 emergelog(xterm_titles, msg)
12497                 writemsg_level(msg + "\n")
12498                 exitcode = git_sync_timestamps(settings, myportdir)
12499                 if exitcode == os.EX_OK:
12500                         updatecache_flg = True
12501         elif syncuri[:8]=="rsync://":
12502                 for vcs_dir in vcs_dirs:
12503                         writemsg_level(("!!! %s appears to be under revision " + \
12504                                 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12505                                 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12506                         return 1
12507                 if not os.path.exists("/usr/bin/rsync"):
12508                         print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12509                         print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12510                         sys.exit(1)
12511                 mytimeout=180
12512
12513                 rsync_opts = []
12514                 if settings["PORTAGE_RSYNC_OPTS"] == "":
12515                         portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12516                         rsync_opts.extend([
12517                                 "--recursive",    # Recurse directories
12518                                 "--links",        # Consider symlinks
12519                                 "--safe-links",   # Ignore links outside of tree
12520                                 "--perms",        # Preserve permissions
12521                                 "--times",        # Preserive mod times
12522                                 "--compress",     # Compress the data transmitted
12523                                 "--force",        # Force deletion on non-empty dirs
12524                                 "--whole-file",   # Don't do block transfers, only entire files
12525                                 "--delete",       # Delete files that aren't in the master tree
12526                                 "--stats",        # Show final statistics about what was transfered
12527                                 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12528                                 "--exclude=/distfiles",   # Exclude distfiles from consideration
12529                                 "--exclude=/local",       # Exclude local     from consideration
12530                                 "--exclude=/packages",    # Exclude packages  from consideration
12531                         ])
12532
12533                 else:
12534                         # The below validation is not needed when using the above hardcoded
12535                         # defaults.
12536
12537                         portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12538                         rsync_opts.extend(
12539                                 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
12540                         for opt in ("--recursive", "--times"):
12541                                 if opt not in rsync_opts:
12542                                         portage.writemsg(yellow("WARNING:") + " adding required option " + \
12543                                         "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12544                                         rsync_opts.append(opt)
12545         
12546                         for exclude in ("distfiles", "local", "packages"):
12547                                 opt = "--exclude=/%s" % exclude
12548                                 if opt not in rsync_opts:
12549                                         portage.writemsg(yellow("WARNING:") + \
12550                                         " adding required option %s not included in "  % opt + \
12551                                         "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12552                                         rsync_opts.append(opt)
12553         
12554                         if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12555                                 def rsync_opt_startswith(opt_prefix):
12556                                         for x in rsync_opts:
12557                                                 if x.startswith(opt_prefix):
12558                                                         return True
12559                                         return False
12560
12561                                 if not rsync_opt_startswith("--timeout="):
12562                                         rsync_opts.append("--timeout=%d" % mytimeout)
12563
12564                                 for opt in ("--compress", "--whole-file"):
12565                                         if opt not in rsync_opts:
12566                                                 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12567                                                 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12568                                                 rsync_opts.append(opt)
12569
12570                 if "--quiet" in myopts:
12571                         rsync_opts.append("--quiet")    # Shut up a lot
12572                 else:
12573                         rsync_opts.append("--verbose")  # Print filelist
12574
12575                 if "--verbose" in myopts:
12576                         rsync_opts.append("--progress")  # Progress meter for each file
12577
12578                 if "--debug" in myopts:
12579                         rsync_opts.append("--checksum") # Force checksum on all files
12580
12581                 # Real local timestamp file.
12582                 servertimestampfile = os.path.join(
12583                         myportdir, "metadata", "timestamp.chk")
12584
12585                 content = portage.util.grabfile(servertimestampfile)
12586                 mytimestamp = 0
12587                 if content:
12588                         try:
12589                                 mytimestamp = time.mktime(time.strptime(content[0],
12590                                         "%a, %d %b %Y %H:%M:%S +0000"))
12591                         except (OverflowError, ValueError):
12592                                 pass
12593                 del content
12594
12595                 try:
12596                         rsync_initial_timeout = \
12597                                 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12598                 except ValueError:
12599                         rsync_initial_timeout = 15
12600
12601                 try:
12602                         maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12603                 except SystemExit, e:
12604                         raise # Needed else can't exit
12605                 except:
12606                         maxretries=3 #default number of retries
12607
12608                 retries=0
12609                 user_name, hostname, port = re.split(
12610                         "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12611                 if port is None:
12612                         port=""
12613                 if user_name is None:
12614                         user_name=""
12615                 updatecache_flg=True
12616                 all_rsync_opts = set(rsync_opts)
12617                 extra_rsync_opts = shlex.split(
12618                         settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
12619                 all_rsync_opts.update(extra_rsync_opts)
12620                 family = socket.AF_INET
12621                 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12622                         family = socket.AF_INET
12623                 elif socket.has_ipv6 and \
12624                         ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12625                         family = socket.AF_INET6
12626                 ips=[]
12627                 SERVER_OUT_OF_DATE = -1
12628                 EXCEEDED_MAX_RETRIES = -2
12629                 while (1):
12630                         if ips:
12631                                 del ips[0]
12632                         if ips==[]:
12633                                 try:
12634                                         for addrinfo in socket.getaddrinfo(
12635                                                 hostname, None, family, socket.SOCK_STREAM):
12636                                                 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12637                                                         # IPv6 addresses need to be enclosed in square brackets
12638                                                         ips.append("[%s]" % addrinfo[4][0])
12639                                                 else:
12640                                                         ips.append(addrinfo[4][0])
12641                                         from random import shuffle
12642                                         shuffle(ips)
12643                                 except SystemExit, e:
12644                                         raise # Needed else can't exit
12645                                 except Exception, e:
12646                                         print "Notice:",str(e)
12647                                         dosyncuri=syncuri
12648
12649                         if ips:
12650                                 try:
12651                                         dosyncuri = syncuri.replace(
12652                                                 "//" + user_name + hostname + port + "/",
12653                                                 "//" + user_name + ips[0] + port + "/", 1)
12654                                 except SystemExit, e:
12655                                         raise # Needed else can't exit
12656                                 except Exception, e:
12657                                         print "Notice:",str(e)
12658                                         dosyncuri=syncuri
12659
12660                         if (retries==0):
12661                                 if "--ask" in myopts:
12662                                         if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12663                                                 print
12664                                                 print "Quitting."
12665                                                 print
12666                                                 sys.exit(0)
12667                                 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12668                                 if "--quiet" not in myopts:
12669                                         print ">>> Starting rsync with "+dosyncuri+"..."
12670                         else:
12671                                 emergelog(xterm_titles,
12672                                         ">>> Starting retry %d of %d with %s" % \
12673                                                 (retries,maxretries,dosyncuri))
12674                                 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
12675
12676                         if mytimestamp != 0 and "--quiet" not in myopts:
12677                                 print ">>> Checking server timestamp ..."
12678
12679                         rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
12680
12681                         if "--debug" in myopts:
12682                                 print rsynccommand
12683
12684                         exitcode = os.EX_OK
12685                         servertimestamp = 0
12686                         # Even if there's no timestamp available locally, fetch the
12687                         # timestamp anyway as an initial probe to verify that the server is
12688                         # responsive.  This protects us from hanging indefinitely on a
12689                         # connection attempt to an unresponsive server which rsync's
12690                         # --timeout option does not prevent.
12691                         if True:
12692                                 # Temporary file for remote server timestamp comparison.
12693                                 from tempfile import mkstemp
12694                                 fd, tmpservertimestampfile = mkstemp()
12695                                 os.close(fd)
12696                                 mycommand = rsynccommand[:]
12697                                 mycommand.append(dosyncuri.rstrip("/") + \
12698                                         "/metadata/timestamp.chk")
12699                                 mycommand.append(tmpservertimestampfile)
12700                                 content = None
12701                                 mypids = []
12702                                 try:
12703                                         def timeout_handler(signum, frame):
12704                                                 raise portage.exception.PortageException("timed out")
12705                                         signal.signal(signal.SIGALRM, timeout_handler)
12706                                         # Timeout here in case the server is unresponsive.  The
12707                                         # --timeout rsync option doesn't apply to the initial
12708                                         # connection attempt.
12709                                         if rsync_initial_timeout:
12710                                                 signal.alarm(rsync_initial_timeout)
12711                                         try:
12712                                                 mypids.extend(portage.process.spawn(
12713                                                         mycommand, env=settings.environ(), returnpid=True))
12714                                                 exitcode = os.waitpid(mypids[0], 0)[1]
12715                                                 content = portage.grabfile(tmpservertimestampfile)
12716                                         finally:
12717                                                 if rsync_initial_timeout:
12718                                                         signal.alarm(0)
12719                                                 try:
12720                                                         os.unlink(tmpservertimestampfile)
12721                                                 except OSError:
12722                                                         pass
12723                                 except portage.exception.PortageException, e:
12724                                         # timed out
12725                                         print e
12726                                         del e
12727                                         if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
12728                                                 os.kill(mypids[0], signal.SIGTERM)
12729                                                 os.waitpid(mypids[0], 0)
12730                                         # This is the same code rsync uses for timeout.
12731                                         exitcode = 30
12732                                 else:
12733                                         if exitcode != os.EX_OK:
12734                                                 if exitcode & 0xff:
12735                                                         exitcode = (exitcode & 0xff) << 8
12736                                                 else:
12737                                                         exitcode = exitcode >> 8
12738                                 if mypids:
12739                                         portage.process.spawned_pids.remove(mypids[0])
12740                                 if content:
12741                                         try:
12742                                                 servertimestamp = time.mktime(time.strptime(
12743                                                         content[0], "%a, %d %b %Y %H:%M:%S +0000"))
12744                                         except (OverflowError, ValueError):
12745                                                 pass
12746                                 del mycommand, mypids, content
12747                         if exitcode == os.EX_OK:
12748                                 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
12749                                         emergelog(xterm_titles,
12750                                                 ">>> Cancelling sync -- Already current.")
12751                                         print
12752                                         print ">>>"
12753                                         print ">>> Timestamps on the server and in the local repository are the same."
12754                                         print ">>> Cancelling all further sync action. You are already up to date."
12755                                         print ">>>"
12756                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
12757                                         print ">>>"
12758                                         print
12759                                         sys.exit(0)
12760                                 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
12761                                         emergelog(xterm_titles,
12762                                                 ">>> Server out of date: %s" % dosyncuri)
12763                                         print
12764                                         print ">>>"
12765                                         print ">>> SERVER OUT OF DATE: %s" % dosyncuri
12766                                         print ">>>"
12767                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
12768                                         print ">>>"
12769                                         print
12770                                         exitcode = SERVER_OUT_OF_DATE
12771                                 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
12772                                         # actual sync
12773                                         mycommand = rsynccommand + [dosyncuri+"/", myportdir]
12774                                         exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
12775                                         if exitcode in [0,1,3,4,11,14,20,21]:
12776                                                 break
12777                         elif exitcode in [1,3,4,11,14,20,21]:
12778                                 break
12779                         else:
12780                                 # Code 2 indicates protocol incompatibility, which is expected
12781                                 # for servers with protocol < 29 that don't support
12782                                 # --prune-empty-directories.  Retry for a server that supports
12783                                 # at least rsync protocol version 29 (>=rsync-2.6.4).
12784                                 pass
12785
12786                         retries=retries+1
12787
12788                         if retries<=maxretries:
12789                                 print ">>> Retrying..."
12790                                 time.sleep(11)
12791                         else:
12792                                 # over retries
12793                                 # exit loop
12794                                 updatecache_flg=False
12795                                 exitcode = EXCEEDED_MAX_RETRIES
12796                                 break
12797
12798                 if (exitcode==0):
12799                         emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
12800                 elif exitcode == SERVER_OUT_OF_DATE:
12801                         sys.exit(1)
12802                 elif exitcode == EXCEEDED_MAX_RETRIES:
12803                         sys.stderr.write(
12804                                 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
12805                         sys.exit(1)
12806                 elif (exitcode>0):
12807                         msg = []
12808                         if exitcode==1:
12809                                 msg.append("Rsync has reported that there is a syntax error. Please ensure")
12810                                 msg.append("that your SYNC statement is proper.")
12811                                 msg.append("SYNC=" + settings["SYNC"])
12812                         elif exitcode==11:
12813                                 msg.append("Rsync has reported that there is a File IO error. Normally")
12814                                 msg.append("this means your disk is full, but can be caused by corruption")
12815                                 msg.append("on the filesystem that contains PORTDIR. Please investigate")
12816                                 msg.append("and try again after the problem has been fixed.")
12817                                 msg.append("PORTDIR=" + settings["PORTDIR"])
12818                         elif exitcode==20:
12819                                 msg.append("Rsync was killed before it finished.")
12820                         else:
12821                                 msg.append("Rsync has not successfully finished. It is recommended that you keep")
12822                                 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
12823                                 msg.append("to use rsync due to firewall or other restrictions. This should be a")
12824                                 msg.append("temporary problem unless complications exist with your network")
12825                                 msg.append("(and possibly your system's filesystem) configuration.")
12826                         for line in msg:
12827                                 out.eerror(line)
12828                         sys.exit(exitcode)
12829         elif syncuri[:6]=="cvs://":
12830                 if not os.path.exists("/usr/bin/cvs"):
12831                         print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
12832                         print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
12833                         sys.exit(1)
12834                 cvsroot=syncuri[6:]
12835                 cvsdir=os.path.dirname(myportdir)
12836                 if not os.path.exists(myportdir+"/CVS"):
12837                         #initial checkout
12838                         print ">>> Starting initial cvs checkout with "+syncuri+"..."
12839                         if os.path.exists(cvsdir+"/gentoo-x86"):
12840                                 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
12841                                 sys.exit(1)
12842                         try:
12843                                 os.rmdir(myportdir)
12844                         except OSError, e:
12845                                 if e.errno != errno.ENOENT:
12846                                         sys.stderr.write(
12847                                                 "!!! existing '%s' directory; exiting.\n" % myportdir)
12848                                         sys.exit(1)
12849                                 del e
12850                         if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
12851                                 print "!!! cvs checkout error; exiting."
12852                                 sys.exit(1)
12853                         os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
12854                 else:
12855                         #cvs update
12856                         print ">>> Starting cvs update with "+syncuri+"..."
12857                         retval = portage.process.spawn_bash(
12858                                 "cd %s; cvs -z0 -q update -dP" % \
12859                                 (portage._shell_quote(myportdir),), **spawn_kwargs)
12860                         if retval != os.EX_OK:
12861                                 sys.exit(retval)
12862                 dosyncuri = syncuri
12863         else:
12864                 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
12865                         noiselevel=-1, level=logging.ERROR)
12866                 return 1
12867
12868         if updatecache_flg and  \
12869                 myaction != "metadata" and \
12870                 "metadata-transfer" not in settings.features:
12871                 updatecache_flg = False
12872
12873         # Reload the whole config from scratch.
12874         settings, trees, mtimedb = load_emerge_config(trees=trees)
12875         root_config = trees[settings["ROOT"]]["root_config"]
12876         portdb = trees[settings["ROOT"]]["porttree"].dbapi
12877
12878         if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
12879                 action_metadata(settings, portdb, myopts)
12880
12881         if portage._global_updates(trees, mtimedb["updates"]):
12882                 mtimedb.commit()
12883                 # Reload the whole config from scratch.
12884                 settings, trees, mtimedb = load_emerge_config(trees=trees)
12885                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12886                 root_config = trees[settings["ROOT"]]["root_config"]
12887
12888         mybestpv = portdb.xmatch("bestmatch-visible",
12889                 portage.const.PORTAGE_PACKAGE_ATOM)
12890         mypvs = portage.best(
12891                 trees[settings["ROOT"]]["vartree"].dbapi.match(
12892                 portage.const.PORTAGE_PACKAGE_ATOM))
12893
12894         chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
12895
12896         if myaction != "metadata":
12897                 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
12898                         retval = portage.process.spawn(
12899                                 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
12900                                 dosyncuri], env=settings.environ())
12901                         if retval != os.EX_OK:
12902                                 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
12903
12904         if(mybestpv != mypvs) and not "--quiet" in myopts:
12905                 print
12906                 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
12907                 print red(" * ")+"that you update portage now, before any other packages are updated."
12908                 print
12909                 print red(" * ")+"To update portage, run 'emerge portage' now."
12910                 print
12911         
12912         display_news_notification(root_config, myopts)
12913         return os.EX_OK
12914
12915 def git_sync_timestamps(settings, portdir):
12916         """
12917         Since git doesn't preserve timestamps, synchronize timestamps between
12918         entries and ebuilds/eclasses. Assume the cache has the correct timestamp
12919         for a given file as long as the file in the working tree is not modified
12920         (relative to HEAD).
12921         """
12922         cache_dir = os.path.join(portdir, "metadata", "cache")
12923         if not os.path.isdir(cache_dir):
12924                 return os.EX_OK
12925         writemsg_level(">>> Synchronizing timestamps...\n")
12926
12927         from portage.cache.cache_errors import CacheError
12928         try:
12929                 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
12930                         portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
12931         except CacheError, e:
12932                 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
12933                         level=logging.ERROR, noiselevel=-1)
12934                 return 1
12935
12936         ec_dir = os.path.join(portdir, "eclass")
12937         try:
12938                 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
12939                         if f.endswith(".eclass"))
12940         except OSError, e:
12941                 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
12942                         level=logging.ERROR, noiselevel=-1)
12943                 return 1
12944
12945         args = [portage.const.BASH_BINARY, "-c",
12946                 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
12947                 portage._shell_quote(portdir)]
12948         import subprocess
12949         proc = subprocess.Popen(args, stdout=subprocess.PIPE)
12950         modified_files = set(l.rstrip("\n") for l in proc.stdout)
12951         rval = proc.wait()
12952         if rval != os.EX_OK:
12953                 return rval
12954
12955         modified_eclasses = set(ec for ec in ec_names \
12956                 if os.path.join("eclass", ec + ".eclass") in modified_files)
12957
12958         updated_ec_mtimes = {}
12959
12960         for cpv in cache_db:
12961                 cpv_split = portage.catpkgsplit(cpv)
12962                 if cpv_split is None:
12963                         writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
12964                                 level=logging.ERROR, noiselevel=-1)
12965                         continue
12966
12967                 cat, pn, ver, rev = cpv_split
12968                 cat, pf = portage.catsplit(cpv)
12969                 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
12970                 if relative_eb_path in modified_files:
12971                         continue
12972
12973                 try:
12974                         cache_entry = cache_db[cpv]
12975                         eb_mtime = cache_entry.get("_mtime_")
12976                         ec_mtimes = cache_entry.get("_eclasses_")
12977                 except KeyError:
12978                         writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
12979                                 level=logging.ERROR, noiselevel=-1)
12980                         continue
12981                 except CacheError, e:
12982                         writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
12983                                 (cpv, e), level=logging.ERROR, noiselevel=-1)
12984                         continue
12985
12986                 if eb_mtime is None:
12987                         writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
12988                                 level=logging.ERROR, noiselevel=-1)
12989                         continue
12990
12991                 try:
12992                         eb_mtime = long(eb_mtime)
12993                 except ValueError:
12994                         writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
12995                                 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
12996                         continue
12997
12998                 if ec_mtimes is None:
12999                         writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
13000                                 level=logging.ERROR, noiselevel=-1)
13001                         continue
13002
13003                 if modified_eclasses.intersection(ec_mtimes):
13004                         continue
13005
13006                 missing_eclasses = set(ec_mtimes).difference(ec_names)
13007                 if missing_eclasses:
13008                         writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
13009                                 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
13010                                 noiselevel=-1)
13011                         continue
13012
13013                 eb_path = os.path.join(portdir, relative_eb_path)
13014                 try:
13015                         current_eb_mtime = os.stat(eb_path)
13016                 except OSError:
13017                         writemsg_level("!!! Missing ebuild: %s\n" % \
13018                                 (cpv,), level=logging.ERROR, noiselevel=-1)
13019                         continue
13020
13021                 inconsistent = False
13022                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13023                         updated_mtime = updated_ec_mtimes.get(ec)
13024                         if updated_mtime is not None and updated_mtime != ec_mtime:
13025                                 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
13026                                         (cpv, ec), level=logging.ERROR, noiselevel=-1)
13027                                 inconsistent = True
13028                                 break
13029
13030                 if inconsistent:
13031                         continue
13032
13033                 if current_eb_mtime != eb_mtime:
13034                         os.utime(eb_path, (eb_mtime, eb_mtime))
13035
13036                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13037                         if ec in updated_ec_mtimes:
13038                                 continue
13039                         ec_path = os.path.join(ec_dir, ec + ".eclass")
13040                         current_mtime = long(os.stat(ec_path).st_mtime)
13041                         if current_mtime != ec_mtime:
13042                                 os.utime(ec_path, (ec_mtime, ec_mtime))
13043                         updated_ec_mtimes[ec] = ec_mtime
13044
13045         return os.EX_OK
13046
13047 def action_metadata(settings, portdb, myopts):
13048         portage.writemsg_stdout("\n>>> Updating Portage cache:      ")
13049         old_umask = os.umask(0002)
13050         cachedir = os.path.normpath(settings.depcachedir)
13051         if cachedir in ["/",    "/bin", "/dev",  "/etc",  "/home",
13052                                         "/lib", "/opt", "/proc", "/root", "/sbin",
13053                                         "/sys", "/tmp", "/usr",  "/var"]:
13054                 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
13055                         "ROOT DIRECTORY ON YOUR SYSTEM."
13056                 print >> sys.stderr, \
13057                         "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
13058                 sys.exit(73)
13059         if not os.path.exists(cachedir):
13060                 os.mkdir(cachedir)
13061
13062         ec = portage.eclass_cache.cache(portdb.porttree_root)
13063         myportdir = os.path.realpath(settings["PORTDIR"])
13064         cm = settings.load_best_module("portdbapi.metadbmodule")(
13065                 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13066
13067         from portage.cache import util
13068
13069         class percentage_noise_maker(util.quiet_mirroring):
13070                 def __init__(self, dbapi):
13071                         self.dbapi = dbapi
13072                         self.cp_all = dbapi.cp_all()
13073                         l = len(self.cp_all)
13074                         self.call_update_min = 100000000
13075                         self.min_cp_all = l/100.0
13076                         self.count = 1
13077                         self.pstr = ''
13078
13079                 def __iter__(self):
13080                         for x in self.cp_all:
13081                                 self.count += 1
13082                                 if self.count > self.min_cp_all:
13083                                         self.call_update_min = 0
13084                                         self.count = 0
13085                                 for y in self.dbapi.cp_list(x):
13086                                         yield y
13087                         self.call_update_mine = 0
13088
13089                 def update(self, *arg):
13090                         try:                            self.pstr = int(self.pstr) + 1
13091                         except ValueError:      self.pstr = 1
13092                         sys.stdout.write("%s%i%%" % \
13093                                 ("\b" * (len(str(self.pstr))+1), self.pstr))
13094                         sys.stdout.flush()
13095                         self.call_update_min = 10000000
13096
13097                 def finish(self, *arg):
13098                         sys.stdout.write("\b\b\b\b100%\n")
13099                         sys.stdout.flush()
13100
13101         if "--quiet" in myopts:
13102                 def quicky_cpv_generator(cp_all_list):
13103                         for x in cp_all_list:
13104                                 for y in portdb.cp_list(x):
13105                                         yield y
13106                 source = quicky_cpv_generator(portdb.cp_all())
13107                 noise_maker = portage.cache.util.quiet_mirroring()
13108         else:
13109                 noise_maker = source = percentage_noise_maker(portdb)
13110         portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
13111                 eclass_cache=ec, verbose_instance=noise_maker)
13112
13113         sys.stdout.flush()
13114         os.umask(old_umask)
13115
13116 def action_regen(settings, portdb, max_jobs, max_load):
13117         xterm_titles = "notitles" not in settings.features
13118         emergelog(xterm_titles, " === regen")
13119         #regenerate cache entries
13120         portage.writemsg_stdout("Regenerating cache entries...\n")
13121         try:
13122                 os.close(sys.stdin.fileno())
13123         except SystemExit, e:
13124                 raise # Needed else can't exit
13125         except:
13126                 pass
13127         sys.stdout.flush()
13128
13129         regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
13130         regen.run()
13131
13132         portage.writemsg_stdout("done!\n")
13133         return regen.returncode
13134
13135 def action_config(settings, trees, myopts, myfiles):
13136         if len(myfiles) != 1:
13137                 print red("!!! config can only take a single package atom at this time\n")
13138                 sys.exit(1)
13139         if not is_valid_package_atom(myfiles[0]):
13140                 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
13141                         noiselevel=-1)
13142                 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
13143                 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
13144                 sys.exit(1)
13145         print
13146         try:
13147                 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
13148         except portage.exception.AmbiguousPackageName, e:
13149                 # Multiple matches thrown from cpv_expand
13150                 pkgs = e.args[0]
13151         if len(pkgs) == 0:
13152                 print "No packages found.\n"
13153                 sys.exit(0)
13154         elif len(pkgs) > 1:
13155                 if "--ask" in myopts:
13156                         options = []
13157                         print "Please select a package to configure:"
13158                         idx = 0
13159                         for pkg in pkgs:
13160                                 idx += 1
13161                                 options.append(str(idx))
13162                                 print options[-1]+") "+pkg
13163                         print "X) Cancel"
13164                         options.append("X")
13165                         idx = userquery("Selection?", options)
13166                         if idx == "X":
13167                                 sys.exit(0)
13168                         pkg = pkgs[int(idx)-1]
13169                 else:
13170                         print "The following packages available:"
13171                         for pkg in pkgs:
13172                                 print "* "+pkg
13173                         print "\nPlease use a specific atom or the --ask option."
13174                         sys.exit(1)
13175         else:
13176                 pkg = pkgs[0]
13177
13178         print
13179         if "--ask" in myopts:
13180                 if userquery("Ready to configure "+pkg+"?") == "No":
13181                         sys.exit(0)
13182         else:
13183                 print "Configuring pkg..."
13184         print
13185         ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
13186         mysettings = portage.config(clone=settings)
13187         vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
13188         debug = mysettings.get("PORTAGE_DEBUG") == "1"
13189         retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
13190                 mysettings,
13191                 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
13192                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
13193         if retval == os.EX_OK:
13194                 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
13195                         mysettings, debug=debug, mydbapi=vardb, tree="vartree")
13196         print
13197
13198 def action_info(settings, trees, myopts, myfiles):
13199         print getportageversion(settings["PORTDIR"], settings["ROOT"],
13200                 settings.profile_path, settings["CHOST"],
13201                 trees[settings["ROOT"]]["vartree"].dbapi)
13202         header_width = 65
13203         header_title = "System Settings"
13204         if myfiles:
13205                 print header_width * "="
13206                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13207         print header_width * "="
13208         print "System uname: "+platform.platform(aliased=1)
13209
13210         lastSync = portage.grabfile(os.path.join(
13211                 settings["PORTDIR"], "metadata", "timestamp.chk"))
13212         print "Timestamp of tree:",
13213         if lastSync:
13214                 print lastSync[0]
13215         else:
13216                 print "Unknown"
13217
13218         output=commands.getstatusoutput("distcc --version")
13219         if not output[0]:
13220                 print str(output[1].split("\n",1)[0]),
13221                 if "distcc" in settings.features:
13222                         print "[enabled]"
13223                 else:
13224                         print "[disabled]"
13225
13226         output=commands.getstatusoutput("ccache -V")
13227         if not output[0]:
13228                 print str(output[1].split("\n",1)[0]),
13229                 if "ccache" in settings.features:
13230                         print "[enabled]"
13231                 else:
13232                         print "[disabled]"
13233
13234         myvars  = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
13235                    "sys-devel/binutils", "sys-devel/libtool",  "dev-lang/python"]
13236         myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
13237         myvars  = portage.util.unique_array(myvars)
13238         myvars.sort()
13239
13240         for x in myvars:
13241                 if portage.isvalidatom(x):
13242                         pkg_matches = trees["/"]["vartree"].dbapi.match(x)
13243                         pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
13244                         pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
13245                         pkgs = []
13246                         for pn, ver, rev in pkg_matches:
13247                                 if rev != "r0":
13248                                         pkgs.append(ver + "-" + rev)
13249                                 else:
13250                                         pkgs.append(ver)
13251                         if pkgs:
13252                                 pkgs = ", ".join(pkgs)
13253                                 print "%-20s %s" % (x+":", pkgs)
13254                 else:
13255                         print "%-20s %s" % (x+":", "[NOT VALID]")
13256
13257         libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
13258
13259         if "--verbose" in myopts:
13260                 myvars=settings.keys()
13261         else:
13262                 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
13263                           'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
13264                           'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
13265                           'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
13266
13267                 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
13268
13269         myvars = portage.util.unique_array(myvars)
13270         unset_vars = []
13271         myvars.sort()
13272         for x in myvars:
13273                 if x in settings:
13274                         if x != "USE":
13275                                 print '%s="%s"' % (x, settings[x])
13276                         else:
13277                                 use = set(settings["USE"].split())
13278                                 use_expand = settings["USE_EXPAND"].split()
13279                                 use_expand.sort()
13280                                 for varname in use_expand:
13281                                         flag_prefix = varname.lower() + "_"
13282                                         for f in list(use):
13283                                                 if f.startswith(flag_prefix):
13284                                                         use.remove(f)
13285                                 use = list(use)
13286                                 use.sort()
13287                                 print 'USE="%s"' % " ".join(use),
13288                                 for varname in use_expand:
13289                                         myval = settings.get(varname)
13290                                         if myval:
13291                                                 print '%s="%s"' % (varname, myval),
13292                                 print
13293                 else:
13294                         unset_vars.append(x)
13295         if unset_vars:
13296                 print "Unset:  "+", ".join(unset_vars)
13297         print
13298
13299         if "--debug" in myopts:
13300                 for x in dir(portage):
13301                         module = getattr(portage, x)
13302                         if "cvs_id_string" in dir(module):
13303                                 print "%s: %s" % (str(x), str(module.cvs_id_string))
13304
13305         # See if we can find any packages installed matching the strings
13306         # passed on the command line
13307         mypkgs = []
13308         vardb = trees[settings["ROOT"]]["vartree"].dbapi
13309         portdb = trees[settings["ROOT"]]["porttree"].dbapi
13310         for x in myfiles:
13311                 mypkgs.extend(vardb.match(x))
13312
13313         # If some packages were found...
13314         if mypkgs:
13315                 # Get our global settings (we only print stuff if it varies from
13316                 # the current config)
13317                 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
13318                 auxkeys = mydesiredvars + [ "USE", "IUSE"]
13319                 global_vals = {}
13320                 pkgsettings = portage.config(clone=settings)
13321
13322                 for myvar in mydesiredvars:
13323                         global_vals[myvar] = set(settings.get(myvar, "").split())
13324
13325                 # Loop through each package
13326                 # Only print settings if they differ from global settings
13327                 header_title = "Package Settings"
13328                 print header_width * "="
13329                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13330                 print header_width * "="
13331                 from portage.output import EOutput
13332                 out = EOutput()
13333                 for pkg in mypkgs:
13334                         # Get all package specific variables
13335                         auxvalues = vardb.aux_get(pkg, auxkeys)
13336                         valuesmap = {}
13337                         for i in xrange(len(auxkeys)):
13338                                 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
13339                         diff_values = {}
13340                         for myvar in mydesiredvars:
13341                                 # If the package variable doesn't match the
13342                                 # current global variable, something has changed
13343                                 # so set diff_found so we know to print
13344                                 if valuesmap[myvar] != global_vals[myvar]:
13345                                         diff_values[myvar] = valuesmap[myvar]
13346                         valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
13347                         valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
13348                         pkgsettings.reset()
13349                         # If a matching ebuild is no longer available in the tree, maybe it
13350                         # would make sense to compare against the flags for the best
13351                         # available version with the same slot?
13352                         mydb = None
13353                         if portdb.cpv_exists(pkg):
13354                                 mydb = portdb
13355                         pkgsettings.setcpv(pkg, mydb=mydb)
13356                         if valuesmap["IUSE"].intersection(
13357                                 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
13358                                 diff_values["USE"] = valuesmap["USE"]
13359                         # If a difference was found, print the info for
13360                         # this package.
13361                         if diff_values:
13362                                 # Print package info
13363                                 print "%s was built with the following:" % pkg
13364                                 for myvar in mydesiredvars + ["USE"]:
13365                                         if myvar in diff_values:
13366                                                 mylist = list(diff_values[myvar])
13367                                                 mylist.sort()
13368                                                 print "%s=\"%s\"" % (myvar, " ".join(mylist))
13369                                 print
13370                         print ">>> Attempting to run pkg_info() for '%s'" % pkg
13371                         ebuildpath = vardb.findname(pkg)
13372                         if not ebuildpath or not os.path.exists(ebuildpath):
13373                                 out.ewarn("No ebuild found for '%s'" % pkg)
13374                                 continue
13375                         portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13376                                 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13377                                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13378                                 tree="vartree")
13379
13380 def action_search(root_config, myopts, myfiles, spinner):
13381         if not myfiles:
13382                 print "emerge: no search terms provided."
13383         else:
13384                 searchinstance = search(root_config,
13385                         spinner, "--searchdesc" in myopts,
13386                         "--quiet" not in myopts, "--usepkg" in myopts,
13387                         "--usepkgonly" in myopts)
13388                 for mysearch in myfiles:
13389                         try:
13390                                 searchinstance.execute(mysearch)
13391                         except re.error, comment:
13392                                 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13393                                 sys.exit(1)
13394                         searchinstance.output()
13395
13396 def action_depclean(settings, trees, ldpath_mtimes,
13397         myopts, action, myfiles, spinner):
13398         # Kill packages that aren't explicitly merged or are required as a
13399         # dependency of another package. World file is explicit.
13400
13401         # Global depclean or prune operations are not very safe when there are
13402         # missing dependencies since it's unknown how badly incomplete
13403         # the dependency graph is, and we might accidentally remove packages
13404         # that should have been pulled into the graph. On the other hand, it's
13405         # relatively safe to ignore missing deps when only asked to remove
13406         # specific packages.
13407         allow_missing_deps = len(myfiles) > 0
13408
13409         msg = []
13410         msg.append("Always study the list of packages to be cleaned for any obvious\n")
13411         msg.append("mistakes. Packages that are part of the world set will always\n")
13412         msg.append("be kept.  They can be manually added to this set with\n")
13413         msg.append(good("`emerge --noreplace <atom>`") + ".  Packages that are listed in\n")
13414         msg.append("package.provided (see portage(5)) will be removed by\n")
13415         msg.append("depclean, even if they are part of the world set.\n")
13416         msg.append("\n")
13417         msg.append("As a safety measure, depclean will not remove any packages\n")
13418         msg.append("unless *all* required dependencies have been resolved.  As a\n")
13419         msg.append("consequence, it is often necessary to run %s\n" % \
13420                 good("`emerge --update"))
13421         msg.append(good("--newuse --deep @system @world`") + \
13422                 " prior to depclean.\n")
13423
13424         if action == "depclean" and "--quiet" not in myopts and not myfiles:
13425                 portage.writemsg_stdout("\n")
13426                 for x in msg:
13427                         portage.writemsg_stdout(colorize("WARN", " * ") + x)
13428
13429         xterm_titles = "notitles" not in settings.features
13430         myroot = settings["ROOT"]
13431         root_config = trees[myroot]["root_config"]
13432         getSetAtoms = root_config.setconfig.getSetAtoms
13433         vardb = trees[myroot]["vartree"].dbapi
13434
13435         required_set_names = ("system", "world")
13436         required_sets = {}
13437         set_args = []
13438
13439         for s in required_set_names:
13440                 required_sets[s] = InternalPackageSet(
13441                         initial_atoms=getSetAtoms(s))
13442
13443         
13444         # When removing packages, use a temporary version of world
13445         # which excludes packages that are intended to be eligible for
13446         # removal.
13447         world_temp_set = required_sets["world"]
13448         system_set = required_sets["system"]
13449
13450         if not system_set or not world_temp_set:
13451
13452                 if not system_set:
13453                         writemsg_level("!!! You have no system list.\n",
13454                                 level=logging.ERROR, noiselevel=-1)
13455
13456                 if not world_temp_set:
13457                         writemsg_level("!!! You have no world file.\n",
13458                                         level=logging.WARNING, noiselevel=-1)
13459
13460                 writemsg_level("!!! Proceeding is likely to " + \
13461                         "break your installation.\n",
13462                         level=logging.WARNING, noiselevel=-1)
13463                 if "--pretend" not in myopts:
13464                         countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
13465
13466         if action == "depclean":
13467                 emergelog(xterm_titles, " >>> depclean")
13468
13469         import textwrap
13470         args_set = InternalPackageSet()
13471         if myfiles:
13472                 for x in myfiles:
13473                         if not is_valid_package_atom(x):
13474                                 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
13475                                         level=logging.ERROR, noiselevel=-1)
13476                                 writemsg_level("!!! Please check ebuild(5) for full details.\n")
13477                                 return
13478                         try:
13479                                 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
13480                         except portage.exception.AmbiguousPackageName, e:
13481                                 msg = "The short ebuild name \"" + x + \
13482                                         "\" is ambiguous.  Please specify " + \
13483                                         "one of the following " + \
13484                                         "fully-qualified ebuild names instead:"
13485                                 for line in textwrap.wrap(msg, 70):
13486                                         writemsg_level("!!! %s\n" % (line,),
13487                                                 level=logging.ERROR, noiselevel=-1)
13488                                 for i in e[0]:
13489                                         writemsg_level("    %s\n" % colorize("INFORM", i),
13490                                                 level=logging.ERROR, noiselevel=-1)
13491                                 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13492                                 return
13493                         args_set.add(atom)
13494                 matched_packages = False
13495                 for x in args_set:
13496                         if vardb.match(x):
13497                                 matched_packages = True
13498                                 break
13499                 if not matched_packages:
13500                         writemsg_level(">>> No packages selected for removal by %s\n" % \
13501                                 action)
13502                         return
13503
13504         writemsg_level("\nCalculating dependencies  ")
13505         resolver_params = create_depgraph_params(myopts, "remove")
13506         resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
13507         vardb = resolver.trees[myroot]["vartree"].dbapi
13508
13509         if action == "depclean":
13510
13511                 if args_set:
13512                         # Pull in everything that's installed but not matched
13513                         # by an argument atom since we don't want to clean any
13514                         # package if something depends on it.
13515
13516                         world_temp_set.clear()
13517                         for pkg in vardb:
13518                                 spinner.update()
13519
13520                                 try:
13521                                         if args_set.findAtomForPackage(pkg) is None:
13522                                                 world_temp_set.add("=" + pkg.cpv)
13523                                                 continue
13524                                 except portage.exception.InvalidDependString, e:
13525                                         show_invalid_depstring_notice(pkg,
13526                                                 pkg.metadata["PROVIDE"], str(e))
13527                                         del e
13528                                         world_temp_set.add("=" + pkg.cpv)
13529                                         continue
13530
13531         elif action == "prune":
13532
13533                 # Pull in everything that's installed since we don't
13534                 # to prune a package if something depends on it.
13535                 world_temp_set.clear()
13536                 world_temp_set.update(vardb.cp_all())
13537
13538                 if not args_set:
13539
13540                         # Try to prune everything that's slotted.
13541                         for cp in vardb.cp_all():
13542                                 if len(vardb.cp_list(cp)) > 1:
13543                                         args_set.add(cp)
13544
13545                 # Remove atoms from world that match installed packages
13546                 # that are also matched by argument atoms, but do not remove
13547                 # them if they match the highest installed version.
13548                 for pkg in vardb:
13549                         spinner.update()
13550                         pkgs_for_cp = vardb.match_pkgs(pkg.cp)
13551                         if not pkgs_for_cp or pkg not in pkgs_for_cp:
13552                                 raise AssertionError("package expected in matches: " + \
13553                                         "cp = %s, cpv = %s matches = %s" % \
13554                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13555
13556                         highest_version = pkgs_for_cp[-1]
13557                         if pkg == highest_version:
13558                                 # pkg is the highest version
13559                                 world_temp_set.add("=" + pkg.cpv)
13560                                 continue
13561
13562                         if len(pkgs_for_cp) <= 1:
13563                                 raise AssertionError("more packages expected: " + \
13564                                         "cp = %s, cpv = %s matches = %s" % \
13565                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13566
13567                         try:
13568                                 if args_set.findAtomForPackage(pkg) is None:
13569                                         world_temp_set.add("=" + pkg.cpv)
13570                                         continue
13571                         except portage.exception.InvalidDependString, e:
13572                                 show_invalid_depstring_notice(pkg,
13573                                         pkg.metadata["PROVIDE"], str(e))
13574                                 del e
13575                                 world_temp_set.add("=" + pkg.cpv)
13576                                 continue
13577
13578         set_args = {}
13579         for s, package_set in required_sets.iteritems():
13580                 set_atom = SETPREFIX + s
13581                 set_arg = SetArg(arg=set_atom, set=package_set,
13582                         root_config=resolver.roots[myroot])
13583                 set_args[s] = set_arg
13584                 for atom in set_arg.set:
13585                         resolver._dep_stack.append(
13586                                 Dependency(atom=atom, root=myroot, parent=set_arg))
13587                         resolver.digraph.add(set_arg, None)
13588
13589         success = resolver._complete_graph()
13590         writemsg_level("\b\b... done!\n")
13591
13592         resolver.display_problems()
13593
13594         if not success:
13595                 return 1
13596
13597         def unresolved_deps():
13598
13599                 unresolvable = set()
13600                 for dep in resolver._initially_unsatisfied_deps:
13601                         if isinstance(dep.parent, Package) and \
13602                                 (dep.priority > UnmergeDepPriority.SOFT):
13603                                 unresolvable.add((dep.atom, dep.parent.cpv))
13604
13605                 if not unresolvable:
13606                         return False
13607
13608                 if unresolvable and not allow_missing_deps:
13609                         prefix = bad(" * ")
13610                         msg = []
13611                         msg.append("Dependencies could not be completely resolved due to")
13612                         msg.append("the following required packages not being installed:")
13613                         msg.append("")
13614                         for atom, parent in unresolvable:
13615                                 msg.append("  %s pulled in by:" % (atom,))
13616                                 msg.append("    %s" % (parent,))
13617                                 msg.append("")
13618                         msg.append("Have you forgotten to run " + \
13619                                 good("`emerge --update --newuse --deep @system @world`") + " prior")
13620                         msg.append(("to %s? It may be necessary to manually " + \
13621                                 "uninstall packages that no longer") % action)
13622                         msg.append("exist in the portage tree since " + \
13623                                 "it may not be possible to satisfy their")
13624                         msg.append("dependencies.  Also, be aware of " + \
13625                                 "the --with-bdeps option that is documented")
13626                         msg.append("in " + good("`man emerge`") + ".")
13627                         if action == "prune":
13628                                 msg.append("")
13629                                 msg.append("If you would like to ignore " + \
13630                                         "dependencies then use %s." % good("--nodeps"))
13631                         writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
13632                                 level=logging.ERROR, noiselevel=-1)
13633                         return True
13634                 return False
13635
13636         if unresolved_deps():
13637                 return 1
13638
13639         graph = resolver.digraph.copy()
13640         required_pkgs_total = 0
13641         for node in graph:
13642                 if isinstance(node, Package):
13643                         required_pkgs_total += 1
13644
13645         def show_parents(child_node):
13646                 parent_nodes = graph.parent_nodes(child_node)
13647                 if not parent_nodes:
13648                         # With --prune, the highest version can be pulled in without any
13649                         # real parent since all installed packages are pulled in.  In that
13650                         # case there's nothing to show here.
13651                         return
13652                 parent_strs = []
13653                 for node in parent_nodes:
13654                         parent_strs.append(str(getattr(node, "cpv", node)))
13655                 parent_strs.sort()
13656                 msg = []
13657                 msg.append("  %s pulled in by:\n" % (child_node.cpv,))
13658                 for parent_str in parent_strs:
13659                         msg.append("    %s\n" % (parent_str,))
13660                 msg.append("\n")
13661                 portage.writemsg_stdout("".join(msg), noiselevel=-1)
13662
13663         def cmp_pkg_cpv(pkg1, pkg2):
13664                 """Sort Package instances by cpv."""
13665                 if pkg1.cpv > pkg2.cpv:
13666                         return 1
13667                 elif pkg1.cpv == pkg2.cpv:
13668                         return 0
13669                 else:
13670                         return -1
13671
13672         def create_cleanlist():
13673                 pkgs_to_remove = []
13674
13675                 if action == "depclean":
13676                         if args_set:
13677
13678                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13679                                         arg_atom = None
13680                                         try:
13681                                                 arg_atom = args_set.findAtomForPackage(pkg)
13682                                         except portage.exception.InvalidDependString:
13683                                                 # this error has already been displayed by now
13684                                                 continue
13685
13686                                         if arg_atom:
13687                                                 if pkg not in graph:
13688                                                         pkgs_to_remove.append(pkg)
13689                                                 elif "--verbose" in myopts:
13690                                                         show_parents(pkg)
13691
13692                         else:
13693                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13694                                         if pkg not in graph:
13695                                                 pkgs_to_remove.append(pkg)
13696                                         elif "--verbose" in myopts:
13697                                                 show_parents(pkg)
13698
13699                 elif action == "prune":
13700                         # Prune really uses all installed instead of world. It's not
13701                         # a real reverse dependency so don't display it as such.
13702                         graph.remove(set_args["world"])
13703
13704                         for atom in args_set:
13705                                 for pkg in vardb.match_pkgs(atom):
13706                                         if pkg not in graph:
13707                                                 pkgs_to_remove.append(pkg)
13708                                         elif "--verbose" in myopts:
13709                                                 show_parents(pkg)
13710
13711                 if not pkgs_to_remove:
13712                         writemsg_level(
13713                                 ">>> No packages selected for removal by %s\n" % action)
13714                         if "--verbose" not in myopts:
13715                                 writemsg_level(
13716                                         ">>> To see reverse dependencies, use %s\n" % \
13717                                                 good("--verbose"))
13718                         if action == "prune":
13719                                 writemsg_level(
13720                                         ">>> To ignore dependencies, use %s\n" % \
13721                                                 good("--nodeps"))
13722
13723                 return pkgs_to_remove
13724
13725         cleanlist = create_cleanlist()
13726
13727         if len(cleanlist):
13728                 clean_set = set(cleanlist)
13729
13730                 # Check if any of these package are the sole providers of libraries
13731                 # with consumers that have not been selected for removal. If so, these
13732                 # packages and any dependencies need to be added to the graph.
13733                 real_vardb = trees[myroot]["vartree"].dbapi
13734                 linkmap = real_vardb.linkmap
13735                 liblist = linkmap.listLibraryObjects()
13736                 consumer_cache = {}
13737                 provider_cache = {}
13738                 soname_cache = {}
13739                 consumer_map = {}
13740
13741                 writemsg_level(">>> Checking for lib consumers...\n")
13742
13743                 for pkg in cleanlist:
13744                         pkg_dblink = real_vardb._dblink(pkg.cpv)
13745                         provided_libs = set()
13746
13747                         for lib in liblist:
13748                                 if pkg_dblink.isowner(lib, myroot):
13749                                         provided_libs.add(lib)
13750
13751                         if not provided_libs:
13752                                 continue
13753
13754                         consumers = {}
13755                         for lib in provided_libs:
13756                                 lib_consumers = consumer_cache.get(lib)
13757                                 if lib_consumers is None:
13758                                         lib_consumers = linkmap.findConsumers(lib)
13759                                         consumer_cache[lib] = lib_consumers
13760                                 if lib_consumers:
13761                                         consumers[lib] = lib_consumers
13762
13763                         if not consumers:
13764                                 continue
13765
13766                         for lib, lib_consumers in consumers.items():
13767                                 for consumer_file in list(lib_consumers):
13768                                         if pkg_dblink.isowner(consumer_file, myroot):
13769                                                 lib_consumers.remove(consumer_file)
13770                                 if not lib_consumers:
13771                                         del consumers[lib]
13772
13773                         if not consumers:
13774                                 continue
13775
13776                         for lib, lib_consumers in consumers.iteritems():
13777
13778                                 soname = soname_cache.get(lib)
13779                                 if soname is None:
13780                                         soname = linkmap.getSoname(lib)
13781                                         soname_cache[lib] = soname
13782
13783                                 consumer_providers = []
13784                                 for lib_consumer in lib_consumers:
13785                                         providers = provider_cache.get(lib)
13786                                         if providers is None:
13787                                                 providers = linkmap.findProviders(lib_consumer)
13788                                                 provider_cache[lib_consumer] = providers
13789                                         if soname not in providers:
13790                                                 # Why does this happen?
13791                                                 continue
13792                                         consumer_providers.append(
13793                                                 (lib_consumer, providers[soname]))
13794
13795                                 consumers[lib] = consumer_providers
13796
13797                         consumer_map[pkg] = consumers
13798
13799                 if consumer_map:
13800
13801                         search_files = set()
13802                         for consumers in consumer_map.itervalues():
13803                                 for lib, consumer_providers in consumers.iteritems():
13804                                         for lib_consumer, providers in consumer_providers:
13805                                                 search_files.add(lib_consumer)
13806                                                 search_files.update(providers)
13807
13808                         writemsg_level(">>> Assigning files to packages...\n")
13809                         file_owners = real_vardb._owners.getFileOwnerMap(search_files)
13810
13811                         for pkg, consumers in consumer_map.items():
13812                                 for lib, consumer_providers in consumers.items():
13813                                         lib_consumers = set()
13814
13815                                         for lib_consumer, providers in consumer_providers:
13816                                                 owner_set = file_owners.get(lib_consumer)
13817                                                 provider_dblinks = set()
13818                                                 provider_pkgs = set()
13819
13820                                                 if len(providers) > 1:
13821                                                         for provider in providers:
13822                                                                 provider_set = file_owners.get(provider)
13823                                                                 if provider_set is not None:
13824                                                                         provider_dblinks.update(provider_set)
13825
13826                                                 if len(provider_dblinks) > 1:
13827                                                         for provider_dblink in provider_dblinks:
13828                                                                 pkg_key = ("installed", myroot,
13829                                                                         provider_dblink.mycpv, "nomerge")
13830                                                                 if pkg_key not in clean_set:
13831                                                                         provider_pkgs.add(vardb.get(pkg_key))
13832
13833                                                 if provider_pkgs:
13834                                                         continue
13835
13836                                                 if owner_set is not None:
13837                                                         lib_consumers.update(owner_set)
13838
13839                                         for consumer_dblink in list(lib_consumers):
13840                                                 if ("installed", myroot, consumer_dblink.mycpv,
13841                                                         "nomerge") in clean_set:
13842                                                         lib_consumers.remove(consumer_dblink)
13843                                                         continue
13844
13845                                         if lib_consumers:
13846                                                 consumers[lib] = lib_consumers
13847                                         else:
13848                                                 del consumers[lib]
13849                                 if not consumers:
13850                                         del consumer_map[pkg]
13851
13852                 if consumer_map:
13853                         # TODO: Implement a package set for rebuilding consumer packages.
13854
13855                         msg = "In order to avoid breakage of link level " + \
13856                                 "dependencies, one or more packages will not be removed. " + \
13857                                 "This can be solved by rebuilding " + \
13858                                 "the packages that pulled them in."
13859
13860                         prefix = bad(" * ")
13861                         from textwrap import wrap
13862                         writemsg_level("".join(prefix + "%s\n" % line for \
13863                                 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
13864
13865                         msg = []
13866                         for pkg, consumers in consumer_map.iteritems():
13867                                 unique_consumers = set(chain(*consumers.values()))
13868                                 unique_consumers = sorted(consumer.mycpv \
13869                                         for consumer in unique_consumers)
13870                                 msg.append("")
13871                                 msg.append("  %s pulled in by:" % (pkg.cpv,))
13872                                 for consumer in unique_consumers:
13873                                         msg.append("    %s" % (consumer,))
13874                         msg.append("")
13875                         writemsg_level("".join(prefix + "%s\n" % line for line in msg),
13876                                 level=logging.WARNING, noiselevel=-1)
13877
13878                         # Add lib providers to the graph as children of lib consumers,
13879                         # and also add any dependencies pulled in by the provider.
13880                         writemsg_level(">>> Adding lib providers to graph...\n")
13881
13882                         for pkg, consumers in consumer_map.iteritems():
13883                                 for consumer_dblink in set(chain(*consumers.values())):
13884                                         consumer_pkg = vardb.get(("installed", myroot,
13885                                                 consumer_dblink.mycpv, "nomerge"))
13886                                         if not resolver._add_pkg(pkg,
13887                                                 Dependency(parent=consumer_pkg,
13888                                                 priority=UnmergeDepPriority(runtime=True),
13889                                                 root=pkg.root)):
13890                                                 resolver.display_problems()
13891                                                 return 1
13892
13893                         writemsg_level("\nCalculating dependencies  ")
13894                         success = resolver._complete_graph()
13895                         writemsg_level("\b\b... done!\n")
13896                         resolver.display_problems()
13897                         if not success:
13898                                 return 1
13899                         if unresolved_deps():
13900                                 return 1
13901
13902                         graph = resolver.digraph.copy()
13903                         required_pkgs_total = 0
13904                         for node in graph:
13905                                 if isinstance(node, Package):
13906                                         required_pkgs_total += 1
13907                         cleanlist = create_cleanlist()
13908                         if not cleanlist:
13909                                 return 0
13910                         clean_set = set(cleanlist)
13911
13912                 # Use a topological sort to create an unmerge order such that
13913                 # each package is unmerged before it's dependencies. This is
13914                 # necessary to avoid breaking things that may need to run
13915                 # during pkg_prerm or pkg_postrm phases.
13916
13917                 # Create a new graph to account for dependencies between the
13918                 # packages being unmerged.
13919                 graph = digraph()
13920                 del cleanlist[:]
13921
13922                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
13923                 runtime = UnmergeDepPriority(runtime=True)
13924                 runtime_post = UnmergeDepPriority(runtime_post=True)
13925                 buildtime = UnmergeDepPriority(buildtime=True)
13926                 priority_map = {
13927                         "RDEPEND": runtime,
13928                         "PDEPEND": runtime_post,
13929                         "DEPEND": buildtime,
13930                 }
13931
13932                 for node in clean_set:
13933                         graph.add(node, None)
13934                         mydeps = []
13935                         node_use = node.metadata["USE"].split()
13936                         for dep_type in dep_keys:
13937                                 depstr = node.metadata[dep_type]
13938                                 if not depstr:
13939                                         continue
13940                                 try:
13941                                         portage.dep._dep_check_strict = False
13942                                         success, atoms = portage.dep_check(depstr, None, settings,
13943                                                 myuse=node_use, trees=resolver._graph_trees,
13944                                                 myroot=myroot)
13945                                 finally:
13946                                         portage.dep._dep_check_strict = True
13947                                 if not success:
13948                                         # Ignore invalid deps of packages that will
13949                                         # be uninstalled anyway.
13950                                         continue
13951
13952                                 priority = priority_map[dep_type]
13953                                 for atom in atoms:
13954                                         if not isinstance(atom, portage.dep.Atom):
13955                                                 # Ignore invalid atoms returned from dep_check().
13956                                                 continue
13957                                         if atom.blocker:
13958                                                 continue
13959                                         matches = vardb.match_pkgs(atom)
13960                                         if not matches:
13961                                                 continue
13962                                         for child_node in matches:
13963                                                 if child_node in clean_set:
13964                                                         graph.add(child_node, node, priority=priority)
13965
13966                 ordered = True
13967                 if len(graph.order) == len(graph.root_nodes()):
13968                         # If there are no dependencies between packages
13969                         # let unmerge() group them by cat/pn.
13970                         ordered = False
13971                         cleanlist = [pkg.cpv for pkg in graph.order]
13972                 else:
13973                         # Order nodes from lowest to highest overall reference count for
13974                         # optimal root node selection.
13975                         node_refcounts = {}
13976                         for node in graph.order:
13977                                 node_refcounts[node] = len(graph.parent_nodes(node))
13978                         def cmp_reference_count(node1, node2):
13979                                 return node_refcounts[node1] - node_refcounts[node2]
13980                         graph.order.sort(key=cmp_sort_key(cmp_reference_count))
13981         
13982                         ignore_priority_range = [None]
13983                         ignore_priority_range.extend(
13984                                 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
13985                         while not graph.empty():
13986                                 for ignore_priority in ignore_priority_range:
13987                                         nodes = graph.root_nodes(ignore_priority=ignore_priority)
13988                                         if nodes:
13989                                                 break
13990                                 if not nodes:
13991                                         raise AssertionError("no root nodes")
13992                                 if ignore_priority is not None:
13993                                         # Some deps have been dropped due to circular dependencies,
13994                                         # so only pop one node in order do minimize the number that
13995                                         # are dropped.
13996                                         del nodes[1:]
13997                                 for node in nodes:
13998                                         graph.remove(node)
13999                                         cleanlist.append(node.cpv)
14000
14001                 unmerge(root_config, myopts, "unmerge", cleanlist,
14002                         ldpath_mtimes, ordered=ordered)
14003
14004         if action == "prune":
14005                 return
14006
14007         if not cleanlist and "--quiet" in myopts:
14008                 return
14009
14010         print "Packages installed:   "+str(len(vardb.cpv_all()))
14011         print "Packages in world:    " + \
14012                 str(len(root_config.sets["world"].getAtoms()))
14013         print "Packages in system:   " + \
14014                 str(len(root_config.sets["system"].getAtoms()))
14015         print "Required packages:    "+str(required_pkgs_total)
14016         if "--pretend" in myopts:
14017                 print "Number to remove:     "+str(len(cleanlist))
14018         else:
14019                 print "Number removed:       "+str(len(cleanlist))
14020
14021 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
14022         """
14023         Construct a depgraph for the given resume list. This will raise
14024         PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
14025         @rtype: tuple
14026         @returns: (success, depgraph, dropped_tasks)
14027         """
14028         skip_masked = True
14029         skip_unsatisfied = True
14030         mergelist = mtimedb["resume"]["mergelist"]
14031         dropped_tasks = set()
14032         while True:
14033                 mydepgraph = depgraph(settings, trees,
14034                         myopts, myparams, spinner)
14035                 try:
14036                         success = mydepgraph.loadResumeCommand(mtimedb["resume"],
14037                                 skip_masked=skip_masked)
14038                 except depgraph.UnsatisfiedResumeDep, e:
14039                         if not skip_unsatisfied:
14040                                 raise
14041
14042                         graph = mydepgraph.digraph
14043                         unsatisfied_parents = dict((dep.parent, dep.parent) \
14044                                 for dep in e.value)
14045                         traversed_nodes = set()
14046                         unsatisfied_stack = list(unsatisfied_parents)
14047                         while unsatisfied_stack:
14048                                 pkg = unsatisfied_stack.pop()
14049                                 if pkg in traversed_nodes:
14050                                         continue
14051                                 traversed_nodes.add(pkg)
14052
14053                                 # If this package was pulled in by a parent
14054                                 # package scheduled for merge, removing this
14055                                 # package may cause the the parent package's
14056                                 # dependency to become unsatisfied.
14057                                 for parent_node in graph.parent_nodes(pkg):
14058                                         if not isinstance(parent_node, Package) \
14059                                                 or parent_node.operation not in ("merge", "nomerge"):
14060                                                 continue
14061                                         unsatisfied = \
14062                                                 graph.child_nodes(parent_node,
14063                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
14064                                         if pkg in unsatisfied:
14065                                                 unsatisfied_parents[parent_node] = parent_node
14066                                                 unsatisfied_stack.append(parent_node)
14067
14068                         pruned_mergelist = []
14069                         for x in mergelist:
14070                                 if isinstance(x, list) and \
14071                                         tuple(x) not in unsatisfied_parents:
14072                                         pruned_mergelist.append(x)
14073
14074                         # If the mergelist doesn't shrink then this loop is infinite.
14075                         if len(pruned_mergelist) == len(mergelist):
14076                                 # This happens if a package can't be dropped because
14077                                 # it's already installed, but it has unsatisfied PDEPEND.
14078                                 raise
14079                         mergelist[:] = pruned_mergelist
14080
14081                         # Exclude installed packages that have been removed from the graph due
14082                         # to failure to build/install runtime dependencies after the dependent
14083                         # package has already been installed.
14084                         dropped_tasks.update(pkg for pkg in \
14085                                 unsatisfied_parents if pkg.operation != "nomerge")
14086                         mydepgraph.break_refs(unsatisfied_parents)
14087
14088                         del e, graph, traversed_nodes, \
14089                                 unsatisfied_parents, unsatisfied_stack
14090                         continue
14091                 else:
14092                         break
14093         return (success, mydepgraph, dropped_tasks)
14094
14095 def action_build(settings, trees, mtimedb,
14096         myopts, myaction, myfiles, spinner):
14097
14098         # validate the state of the resume data
14099         # so that we can make assumptions later.
14100         for k in ("resume", "resume_backup"):
14101                 if k not in mtimedb:
14102                         continue
14103                 resume_data = mtimedb[k]
14104                 if not isinstance(resume_data, dict):
14105                         del mtimedb[k]
14106                         continue
14107                 mergelist = resume_data.get("mergelist")
14108                 if not isinstance(mergelist, list):
14109                         del mtimedb[k]
14110                         continue
14111                 for x in mergelist:
14112                         if not (isinstance(x, list) and len(x) == 4):
14113                                 continue
14114                         pkg_type, pkg_root, pkg_key, pkg_action = x
14115                         if pkg_root not in trees:
14116                                 # Current $ROOT setting differs,
14117                                 # so the list must be stale.
14118                                 mergelist = None
14119                                 break
14120                 if not mergelist:
14121                         del mtimedb[k]
14122                         continue
14123                 resume_opts = resume_data.get("myopts")
14124                 if not isinstance(resume_opts, (dict, list)):
14125                         del mtimedb[k]
14126                         continue
14127                 favorites = resume_data.get("favorites")
14128                 if not isinstance(favorites, list):
14129                         del mtimedb[k]
14130                         continue
14131
14132         resume = False
14133         if "--resume" in myopts and \
14134                 ("resume" in mtimedb or
14135                 "resume_backup" in mtimedb):
14136                 resume = True
14137                 if "resume" not in mtimedb:
14138                         mtimedb["resume"] = mtimedb["resume_backup"]
14139                         del mtimedb["resume_backup"]
14140                         mtimedb.commit()
14141                 # "myopts" is a list for backward compatibility.
14142                 resume_opts = mtimedb["resume"].get("myopts", [])
14143                 if isinstance(resume_opts, list):
14144                         resume_opts = dict((k,True) for k in resume_opts)
14145                 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
14146                         resume_opts.pop(opt, None)
14147                 myopts.update(resume_opts)
14148
14149                 if "--debug" in myopts:
14150                         writemsg_level("myopts %s\n" % (myopts,))
14151
14152                 # Adjust config according to options of the command being resumed.
14153                 for myroot in trees:
14154                         mysettings =  trees[myroot]["vartree"].settings
14155                         mysettings.unlock()
14156                         adjust_config(myopts, mysettings)
14157                         mysettings.lock()
14158                         del myroot, mysettings
14159
14160         ldpath_mtimes = mtimedb["ldpath"]
14161         favorites=[]
14162         merge_count = 0
14163         buildpkgonly = "--buildpkgonly" in myopts
14164         pretend = "--pretend" in myopts
14165         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14166         ask = "--ask" in myopts
14167         nodeps = "--nodeps" in myopts
14168         oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
14169         tree = "--tree" in myopts
14170         if nodeps and tree:
14171                 tree = False
14172                 del myopts["--tree"]
14173                 portage.writemsg(colorize("WARN", " * ") + \
14174                         "--tree is broken with --nodeps. Disabling...\n")
14175         debug = "--debug" in myopts
14176         verbose = "--verbose" in myopts
14177         quiet = "--quiet" in myopts
14178         if pretend or fetchonly:
14179                 # make the mtimedb readonly
14180                 mtimedb.filename = None
14181         if "--digest" in myopts:
14182                 msg = "The --digest option can prevent corruption from being" + \
14183                         " noticed. The `repoman manifest` command is the preferred" + \
14184                         " way to generate manifests and it is capable of doing an" + \
14185                         " entire repository or category at once."
14186                 prefix = bad(" * ")
14187                 writemsg(prefix + "\n")
14188                 from textwrap import wrap
14189                 for line in wrap(msg, 72):
14190                         writemsg("%s%s\n" % (prefix, line))
14191                 writemsg(prefix + "\n")
14192
14193         if "--quiet" not in myopts and \
14194                 ("--pretend" in myopts or "--ask" in myopts or \
14195                 "--tree" in myopts or "--verbose" in myopts):
14196                 action = ""
14197                 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14198                         action = "fetched"
14199                 elif "--buildpkgonly" in myopts:
14200                         action = "built"
14201                 else:
14202                         action = "merged"
14203                 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
14204                         print
14205                         print darkgreen("These are the packages that would be %s, in reverse order:") % action
14206                         print
14207                 else:
14208                         print
14209                         print darkgreen("These are the packages that would be %s, in order:") % action
14210                         print
14211
14212         show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
14213         if not show_spinner:
14214                 spinner.update = spinner.update_quiet
14215
14216         if resume:
14217                 favorites = mtimedb["resume"].get("favorites")
14218                 if not isinstance(favorites, list):
14219                         favorites = []
14220
14221                 if show_spinner:
14222                         print "Calculating dependencies  ",
14223                 myparams = create_depgraph_params(myopts, myaction)
14224
14225                 resume_data = mtimedb["resume"]
14226                 mergelist = resume_data["mergelist"]
14227                 if mergelist and "--skipfirst" in myopts:
14228                         for i, task in enumerate(mergelist):
14229                                 if isinstance(task, list) and \
14230                                         task and task[-1] == "merge":
14231                                         del mergelist[i]
14232                                         break
14233
14234                 success = False
14235                 mydepgraph = None
14236                 try:
14237                         success, mydepgraph, dropped_tasks = resume_depgraph(
14238                                 settings, trees, mtimedb, myopts, myparams, spinner)
14239                 except (portage.exception.PackageNotFound,
14240                         depgraph.UnsatisfiedResumeDep), e:
14241                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
14242                                 mydepgraph = e.depgraph
14243                         if show_spinner:
14244                                 print
14245                         from textwrap import wrap
14246                         from portage.output import EOutput
14247                         out = EOutput()
14248
14249                         resume_data = mtimedb["resume"]
14250                         mergelist = resume_data.get("mergelist")
14251                         if not isinstance(mergelist, list):
14252                                 mergelist = []
14253                         if mergelist and debug or (verbose and not quiet):
14254                                 out.eerror("Invalid resume list:")
14255                                 out.eerror("")
14256                                 indent = "  "
14257                                 for task in mergelist:
14258                                         if isinstance(task, list):
14259                                                 out.eerror(indent + str(tuple(task)))
14260                                 out.eerror("")
14261
14262                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
14263                                 out.eerror("One or more packages are either masked or " + \
14264                                         "have missing dependencies:")
14265                                 out.eerror("")
14266                                 indent = "  "
14267                                 for dep in e.value:
14268                                         if dep.atom is None:
14269                                                 out.eerror(indent + "Masked package:")
14270                                                 out.eerror(2 * indent + str(dep.parent))
14271                                                 out.eerror("")
14272                                         else:
14273                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
14274                                                 out.eerror(2 * indent + str(dep.parent))
14275                                                 out.eerror("")
14276                                 msg = "The resume list contains packages " + \
14277                                         "that are either masked or have " + \
14278                                         "unsatisfied dependencies. " + \
14279                                         "Please restart/continue " + \
14280                                         "the operation manually, or use --skipfirst " + \
14281                                         "to skip the first package in the list and " + \
14282                                         "any other packages that may be " + \
14283                                         "masked or have missing dependencies."
14284                                 for line in wrap(msg, 72):
14285                                         out.eerror(line)
14286                         elif isinstance(e, portage.exception.PackageNotFound):
14287                                 out.eerror("An expected package is " + \
14288                                         "not available: %s" % str(e))
14289                                 out.eerror("")
14290                                 msg = "The resume list contains one or more " + \
14291                                         "packages that are no longer " + \
14292                                         "available. Please restart/continue " + \
14293                                         "the operation manually."
14294                                 for line in wrap(msg, 72):
14295                                         out.eerror(line)
14296                 else:
14297                         if show_spinner:
14298                                 print "\b\b... done!"
14299
14300                 if success:
14301                         if dropped_tasks:
14302                                 portage.writemsg("!!! One or more packages have been " + \
14303                                         "dropped due to\n" + \
14304                                         "!!! masking or unsatisfied dependencies:\n\n",
14305                                         noiselevel=-1)
14306                                 for task in dropped_tasks:
14307                                         portage.writemsg("  " + str(task) + "\n", noiselevel=-1)
14308                                 portage.writemsg("\n", noiselevel=-1)
14309                         del dropped_tasks
14310                 else:
14311                         if mydepgraph is not None:
14312                                 mydepgraph.display_problems()
14313                         if not (ask or pretend):
14314                                 # delete the current list and also the backup
14315                                 # since it's probably stale too.
14316                                 for k in ("resume", "resume_backup"):
14317                                         mtimedb.pop(k, None)
14318                                 mtimedb.commit()
14319
14320                         return 1
14321         else:
14322                 if ("--resume" in myopts):
14323                         print darkgreen("emerge: It seems we have nothing to resume...")
14324                         return os.EX_OK
14325
14326                 myparams = create_depgraph_params(myopts, myaction)
14327                 if "--quiet" not in myopts and "--nodeps" not in myopts:
14328                         print "Calculating dependencies  ",
14329                         sys.stdout.flush()
14330                 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
14331                 try:
14332                         retval, favorites = mydepgraph.select_files(myfiles)
14333                 except portage.exception.PackageNotFound, e:
14334                         portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
14335                         return 1
14336                 except portage.exception.PackageSetNotFound, e:
14337                         root_config = trees[settings["ROOT"]]["root_config"]
14338                         display_missing_pkg_set(root_config, e.value)
14339                         return 1
14340                 if show_spinner:
14341                         print "\b\b... done!"
14342                 if not retval:
14343                         mydepgraph.display_problems()
14344                         return 1
14345
14346         if "--pretend" not in myopts and \
14347                 ("--ask" in myopts or "--tree" in myopts or \
14348                 "--verbose" in myopts) and \
14349                 not ("--quiet" in myopts and "--ask" not in myopts):
14350                 if "--resume" in myopts:
14351                         mymergelist = mydepgraph.altlist()
14352                         if len(mymergelist) == 0:
14353                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14354                                 return os.EX_OK
14355                         favorites = mtimedb["resume"]["favorites"]
14356                         retval = mydepgraph.display(
14357                                 mydepgraph.altlist(reversed=tree),
14358                                 favorites=favorites)
14359                         mydepgraph.display_problems()
14360                         if retval != os.EX_OK:
14361                                 return retval
14362                         prompt="Would you like to resume merging these packages?"
14363                 else:
14364                         retval = mydepgraph.display(
14365                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
14366                                 favorites=favorites)
14367                         mydepgraph.display_problems()
14368                         if retval != os.EX_OK:
14369                                 return retval
14370                         mergecount=0
14371                         for x in mydepgraph.altlist():
14372                                 if isinstance(x, Package) and x.operation == "merge":
14373                                         mergecount += 1
14374
14375                         if mergecount==0:
14376                                 sets = trees[settings["ROOT"]]["root_config"].sets
14377                                 world_candidates = None
14378                                 if "--noreplace" in myopts and \
14379                                         not oneshot and favorites:
14380                                         # Sets that are not world candidates are filtered
14381                                         # out here since the favorites list needs to be
14382                                         # complete for depgraph.loadResumeCommand() to
14383                                         # operate correctly.
14384                                         world_candidates = [x for x in favorites \
14385                                                 if not (x.startswith(SETPREFIX) and \
14386                                                 not sets[x[1:]].world_candidate)]
14387                                 if "--noreplace" in myopts and \
14388                                         not oneshot and world_candidates:
14389                                         print
14390                                         for x in world_candidates:
14391                                                 print " %s %s" % (good("*"), x)
14392                                         prompt="Would you like to add these packages to your world favorites?"
14393                                 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
14394                                         prompt="Nothing to merge; would you like to auto-clean packages?"
14395                                 else:
14396                                         print
14397                                         print "Nothing to merge; quitting."
14398                                         print
14399                                         return os.EX_OK
14400                         elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14401                                 prompt="Would you like to fetch the source files for these packages?"
14402                         else:
14403                                 prompt="Would you like to merge these packages?"
14404                 print
14405                 if "--ask" in myopts and userquery(prompt) == "No":
14406                         print
14407                         print "Quitting."
14408                         print
14409                         return os.EX_OK
14410                 # Don't ask again (e.g. when auto-cleaning packages after merge)
14411                 myopts.pop("--ask", None)
14412
14413         if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14414                 if ("--resume" in myopts):
14415                         mymergelist = mydepgraph.altlist()
14416                         if len(mymergelist) == 0:
14417                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14418                                 return os.EX_OK
14419                         favorites = mtimedb["resume"]["favorites"]
14420                         retval = mydepgraph.display(
14421                                 mydepgraph.altlist(reversed=tree),
14422                                 favorites=favorites)
14423                         mydepgraph.display_problems()
14424                         if retval != os.EX_OK:
14425                                 return retval
14426                 else:
14427                         retval = mydepgraph.display(
14428                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
14429                                 favorites=favorites)
14430                         mydepgraph.display_problems()
14431                         if retval != os.EX_OK:
14432                                 return retval
14433                         if "--buildpkgonly" in myopts:
14434                                 graph_copy = mydepgraph.digraph.clone()
14435                                 removed_nodes = set()
14436                                 for node in list(graph_copy.order):
14437                                         if not isinstance(node, Package) or \
14438                                                 node.operation == "nomerge":
14439                                                 removed_nodes.add(node)
14440                                 graph_copy.difference_update(removed_nodes)
14441                                 if not graph_copy.hasallzeros(ignore_priority = \
14442                                         DepPrioritySatisfiedRange.ignore_medium):
14443                                         print "\n!!! --buildpkgonly requires all dependencies to be merged."
14444                                         print "!!! You have to merge the dependencies before you can build this package.\n"
14445                                         return 1
14446         else:
14447                 if "--buildpkgonly" in myopts:
14448                         graph_copy = mydepgraph.digraph.clone()
14449                         removed_nodes = set()
14450                         for node in list(graph_copy.order):
14451                                 if not isinstance(node, Package) or \
14452                                         node.operation == "nomerge":
14453                                         removed_nodes.add(node)
14454                         graph_copy.difference_update(removed_nodes)
14455                         if not graph_copy.hasallzeros(ignore_priority = \
14456                                 DepPrioritySatisfiedRange.ignore_medium):
14457                                 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14458                                 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
14459                                 return 1
14460
14461                 if ("--resume" in myopts):
14462                         favorites=mtimedb["resume"]["favorites"]
14463                         mymergelist = mydepgraph.altlist()
14464                         mydepgraph.break_refs(mymergelist)
14465                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
14466                                 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
14467                         del mydepgraph, mymergelist
14468                         clear_caches(trees)
14469
14470                         retval = mergetask.merge()
14471                         merge_count = mergetask.curval
14472                 else:
14473                         if "resume" in mtimedb and \
14474                         "mergelist" in mtimedb["resume"] and \
14475                         len(mtimedb["resume"]["mergelist"]) > 1:
14476                                 mtimedb["resume_backup"] = mtimedb["resume"]
14477                                 del mtimedb["resume"]
14478                                 mtimedb.commit()
14479                         mtimedb["resume"]={}
14480                         # Stored as a dict starting with portage-2.1.6_rc1, and supported
14481                         # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
14482                         # a list type for options.
14483                         mtimedb["resume"]["myopts"] = myopts.copy()
14484
14485                         # Convert Atom instances to plain str.
14486                         mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
14487
14488                         if ("--digest" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14489                                 for pkgline in mydepgraph.altlist():
14490                                         if pkgline[0]=="ebuild" and pkgline[3]=="merge":
14491                                                 y = trees[pkgline[1]]["porttree"].dbapi.findname(pkgline[2])
14492                                                 tmpsettings = portage.config(clone=settings)
14493                                                 edebug = 0
14494                                                 if settings.get("PORTAGE_DEBUG", "") == "1":
14495                                                         edebug = 1
14496                                                 retval = portage.doebuild(
14497                                                         y, "digest", settings["ROOT"], tmpsettings, edebug,
14498                                                         ("--pretend" in myopts),
14499                                                         mydbapi=trees[pkgline[1]]["porttree"].dbapi,
14500                                                         tree="porttree")
14501
14502                         pkglist = mydepgraph.altlist()
14503                         mydepgraph.saveNomergeFavorites()
14504                         mydepgraph.break_refs(pkglist)
14505                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
14506                                 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
14507                         del mydepgraph, pkglist
14508                         clear_caches(trees)
14509
14510                         retval = mergetask.merge()
14511                         merge_count = mergetask.curval
14512
14513                 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
14514                         if "yes" == settings.get("AUTOCLEAN"):
14515                                 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
14516                                 unmerge(trees[settings["ROOT"]]["root_config"],
14517                                         myopts, "clean", [],
14518                                         ldpath_mtimes, autoclean=1)
14519                         else:
14520                                 portage.writemsg_stdout(colorize("WARN", "WARNING:")
14521                                         + " AUTOCLEAN is disabled.  This can cause serious"
14522                                         + " problems due to overlapping packages.\n")
14523                         trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
14524
14525                 return retval
14526
14527 def multiple_actions(action1, action2):
14528         sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
14529         sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
14530         sys.exit(1)
14531
14532 def insert_optional_args(args):
14533         """
14534         Parse optional arguments and insert a value if one has
14535         not been provided. This is done before feeding the args
14536         to the optparse parser since that parser does not support
14537         this feature natively.
14538         """
14539
14540         new_args = []
14541         jobs_opts = ("-j", "--jobs")
14542         arg_stack = args[:]
14543         arg_stack.reverse()
14544         while arg_stack:
14545                 arg = arg_stack.pop()
14546
14547                 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
14548                 if not (short_job_opt or arg in jobs_opts):
14549                         new_args.append(arg)
14550                         continue
14551
14552                 # Insert an empty placeholder in order to
14553                 # satisfy the requirements of optparse.
14554
14555                 new_args.append("--jobs")
14556                 job_count = None
14557                 saved_opts = None
14558                 if short_job_opt and len(arg) > 2:
14559                         if arg[:2] == "-j":
14560                                 try:
14561                                         job_count = int(arg[2:])
14562                                 except ValueError:
14563                                         saved_opts = arg[2:]
14564                         else:
14565                                 job_count = "True"
14566                                 saved_opts = arg[1:].replace("j", "")
14567
14568                 if job_count is None and arg_stack:
14569                         try:
14570                                 job_count = int(arg_stack[-1])
14571                         except ValueError:
14572                                 pass
14573                         else:
14574                                 # Discard the job count from the stack
14575                                 # since we're consuming it here.
14576                                 arg_stack.pop()
14577
14578                 if job_count is None:
14579                         # unlimited number of jobs
14580                         new_args.append("True")
14581                 else:
14582                         new_args.append(str(job_count))
14583
14584                 if saved_opts is not None:
14585                         new_args.append("-" + saved_opts)
14586
14587         return new_args
14588
14589 def parse_opts(tmpcmdline, silent=False):
14590         myaction=None
14591         myopts = {}
14592         myfiles=[]
14593
14594         global actions, options, shortmapping
14595
14596         longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
14597         argument_options = {
14598                 "--config-root": {
14599                         "help":"specify the location for portage configuration files",
14600                         "action":"store"
14601                 },
14602                 "--color": {
14603                         "help":"enable or disable color output",
14604                         "type":"choice",
14605                         "choices":("y", "n")
14606                 },
14607
14608                 "--jobs": {
14609
14610                         "help"   : "Specifies the number of packages to build " + \
14611                                 "simultaneously.",
14612
14613                         "action" : "store"
14614                 },
14615
14616                 "--load-average": {
14617
14618                         "help"   :"Specifies that no new builds should be started " + \
14619                                 "if there are other builds running and the load average " + \
14620                                 "is at least LOAD (a floating-point number).",
14621
14622                         "action" : "store"
14623                 },
14624
14625                 "--with-bdeps": {
14626                         "help":"include unnecessary build time dependencies",
14627                         "type":"choice",
14628                         "choices":("y", "n")
14629                 },
14630                 "--reinstall": {
14631                         "help":"specify conditions to trigger package reinstallation",
14632                         "type":"choice",
14633                         "choices":["changed-use"]
14634                 }
14635         }
14636
14637         from optparse import OptionParser
14638         parser = OptionParser()
14639         if parser.has_option("--help"):
14640                 parser.remove_option("--help")
14641
14642         for action_opt in actions:
14643                 parser.add_option("--" + action_opt, action="store_true",
14644                         dest=action_opt.replace("-", "_"), default=False)
14645         for myopt in options:
14646                 parser.add_option(myopt, action="store_true",
14647                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
14648         for shortopt, longopt in shortmapping.iteritems():
14649                 parser.add_option("-" + shortopt, action="store_true",
14650                         dest=longopt.lstrip("--").replace("-", "_"), default=False)
14651         for myalias, myopt in longopt_aliases.iteritems():
14652                 parser.add_option(myalias, action="store_true",
14653                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
14654
14655         for myopt, kwargs in argument_options.iteritems():
14656                 parser.add_option(myopt,
14657                         dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
14658
14659         tmpcmdline = insert_optional_args(tmpcmdline)
14660
14661         myoptions, myargs = parser.parse_args(args=tmpcmdline)
14662
14663         if myoptions.jobs:
14664                 jobs = None
14665                 if myoptions.jobs == "True":
14666                         jobs = True
14667                 else:
14668                         try:
14669                                 jobs = int(myoptions.jobs)
14670                         except ValueError:
14671                                 jobs = -1
14672
14673                 if jobs is not True and \
14674                         jobs < 1:
14675                         jobs = None
14676                         if not silent:
14677                                 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
14678                                         (myoptions.jobs,), noiselevel=-1)
14679
14680                 myoptions.jobs = jobs
14681
14682         if myoptions.load_average:
14683                 try:
14684                         load_average = float(myoptions.load_average)
14685                 except ValueError:
14686                         load_average = 0.0
14687
14688                 if load_average <= 0.0:
14689                         load_average = None
14690                         if not silent:
14691                                 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
14692                                         (myoptions.load_average,), noiselevel=-1)
14693
14694                 myoptions.load_average = load_average
14695
14696         for myopt in options:
14697                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
14698                 if v:
14699                         myopts[myopt] = True
14700
14701         for myopt in argument_options:
14702                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
14703                 if v is not None:
14704                         myopts[myopt] = v
14705
14706         for action_opt in actions:
14707                 v = getattr(myoptions, action_opt.replace("-", "_"))
14708                 if v:
14709                         if myaction:
14710                                 multiple_actions(myaction, action_opt)
14711                                 sys.exit(1)
14712                         myaction = action_opt
14713
14714         myfiles += myargs
14715
14716         return myaction, myopts, myfiles
14717
14718 def validate_ebuild_environment(trees):
14719         for myroot in trees:
14720                 settings = trees[myroot]["vartree"].settings
14721                 settings.validate()
14722
14723 def clear_caches(trees):
14724         for d in trees.itervalues():
14725                 d["porttree"].dbapi.melt()
14726                 d["porttree"].dbapi._aux_cache.clear()
14727                 d["bintree"].dbapi._aux_cache.clear()
14728                 d["bintree"].dbapi._clear_cache()
14729                 d["vartree"].dbapi.linkmap._clear_cache()
14730         portage.dircache.clear()
14731         gc.collect()
14732
14733 def load_emerge_config(trees=None):
14734         kwargs = {}
14735         for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
14736                 v = os.environ.get(envvar, None)
14737                 if v and v.strip():
14738                         kwargs[k] = v
14739         trees = portage.create_trees(trees=trees, **kwargs)
14740
14741         for root, root_trees in trees.iteritems():
14742                 settings = root_trees["vartree"].settings
14743                 setconfig = load_default_config(settings, root_trees)
14744                 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
14745
14746         settings = trees["/"]["vartree"].settings
14747
14748         for myroot in trees:
14749                 if myroot != "/":
14750                         settings = trees[myroot]["vartree"].settings
14751                         break
14752
14753         mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
14754         mtimedb = portage.MtimeDB(mtimedbfile)
14755         
14756         return settings, trees, mtimedb
14757
14758 def adjust_config(myopts, settings):
14759         """Make emerge specific adjustments to the config."""
14760
14761         # To enhance usability, make some vars case insensitive by forcing them to
14762         # lower case.
14763         for myvar in ("AUTOCLEAN", "NOCOLOR"):
14764                 if myvar in settings:
14765                         settings[myvar] = settings[myvar].lower()
14766                         settings.backup_changes(myvar)
14767         del myvar
14768
14769         # Kill noauto as it will break merges otherwise.
14770         if "noauto" in settings.features:
14771                 while "noauto" in settings.features:
14772                         settings.features.remove("noauto")
14773                 settings["FEATURES"] = " ".join(settings.features)
14774                 settings.backup_changes("FEATURES")
14775
14776         CLEAN_DELAY = 5
14777         try:
14778                 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
14779         except ValueError, e:
14780                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14781                 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
14782                         settings["CLEAN_DELAY"], noiselevel=-1)
14783         settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
14784         settings.backup_changes("CLEAN_DELAY")
14785
14786         EMERGE_WARNING_DELAY = 10
14787         try:
14788                 EMERGE_WARNING_DELAY = int(settings.get(
14789                         "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
14790         except ValueError, e:
14791                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14792                 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
14793                         settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
14794         settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
14795         settings.backup_changes("EMERGE_WARNING_DELAY")
14796
14797         if "--quiet" in myopts:
14798                 settings["PORTAGE_QUIET"]="1"
14799                 settings.backup_changes("PORTAGE_QUIET")
14800
14801         if "--verbose" in myopts:
14802                 settings["PORTAGE_VERBOSE"] = "1"
14803                 settings.backup_changes("PORTAGE_VERBOSE")
14804
14805         # Set so that configs will be merged regardless of remembered status
14806         if ("--noconfmem" in myopts):
14807                 settings["NOCONFMEM"]="1"
14808                 settings.backup_changes("NOCONFMEM")
14809
14810         # Set various debug markers... They should be merged somehow.
14811         PORTAGE_DEBUG = 0
14812         try:
14813                 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
14814                 if PORTAGE_DEBUG not in (0, 1):
14815                         portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
14816                                 PORTAGE_DEBUG, noiselevel=-1)
14817                         portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
14818                                 noiselevel=-1)
14819                         PORTAGE_DEBUG = 0
14820         except ValueError, e:
14821                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14822                 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
14823                         settings["PORTAGE_DEBUG"], noiselevel=-1)
14824                 del e
14825         if "--debug" in myopts:
14826                 PORTAGE_DEBUG = 1
14827         settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
14828         settings.backup_changes("PORTAGE_DEBUG")
14829
14830         if settings.get("NOCOLOR") not in ("yes","true"):
14831                 portage.output.havecolor = 1
14832
14833         """The explicit --color < y | n > option overrides the NOCOLOR environment
14834         variable and stdout auto-detection."""
14835         if "--color" in myopts:
14836                 if "y" == myopts["--color"]:
14837                         portage.output.havecolor = 1
14838                         settings["NOCOLOR"] = "false"
14839                 else:
14840                         portage.output.havecolor = 0
14841                         settings["NOCOLOR"] = "true"
14842                 settings.backup_changes("NOCOLOR")
14843         elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
14844                 portage.output.havecolor = 0
14845                 settings["NOCOLOR"] = "true"
14846                 settings.backup_changes("NOCOLOR")
14847
14848 def apply_priorities(settings):
14849         ionice(settings)
14850         nice(settings)
14851
14852 def nice(settings):
14853         try:
14854                 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
14855         except (OSError, ValueError), e:
14856                 out = portage.output.EOutput()
14857                 out.eerror("Failed to change nice value to '%s'" % \
14858                         settings["PORTAGE_NICENESS"])
14859                 out.eerror("%s\n" % str(e))
14860
14861 def ionice(settings):
14862
14863         ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
14864         if ionice_cmd:
14865                 ionice_cmd = shlex.split(ionice_cmd)
14866         if not ionice_cmd:
14867                 return
14868
14869         from portage.util import varexpand
14870         variables = {"PID" : str(os.getpid())}
14871         cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
14872
14873         try:
14874                 rval = portage.process.spawn(cmd, env=os.environ)
14875         except portage.exception.CommandNotFound:
14876                 # The OS kernel probably doesn't support ionice,
14877                 # so return silently.
14878                 return
14879
14880         if rval != os.EX_OK:
14881                 out = portage.output.EOutput()
14882                 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
14883                 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
14884
14885 def display_missing_pkg_set(root_config, set_name):
14886
14887         msg = []
14888         msg.append(("emerge: There are no sets to satisfy '%s'. " + \
14889                 "The following sets exist:") % \
14890                 colorize("INFORM", set_name))
14891         msg.append("")
14892
14893         for s in sorted(root_config.sets):
14894                 msg.append("    %s" % s)
14895         msg.append("")
14896
14897         writemsg_level("".join("%s\n" % l for l in msg),
14898                 level=logging.ERROR, noiselevel=-1)
14899
14900 def expand_set_arguments(myfiles, myaction, root_config):
14901         retval = os.EX_OK
14902         setconfig = root_config.setconfig
14903
14904         sets = setconfig.getSets()
14905
14906         # In order to know exactly which atoms/sets should be added to the
14907         # world file, the depgraph performs set expansion later. It will get
14908         # confused about where the atoms came from if it's not allowed to
14909         # expand them itself.
14910         do_not_expand = (None, )
14911         newargs = []
14912         for a in myfiles:
14913                 if a in ("system", "world"):
14914                         newargs.append(SETPREFIX+a)
14915                 else:
14916                         newargs.append(a)
14917         myfiles = newargs
14918         del newargs
14919         newargs = []
14920
14921         # separators for set arguments
14922         ARG_START = "{"
14923         ARG_END = "}"
14924
14925         # WARNING: all operators must be of equal length
14926         IS_OPERATOR = "/@"
14927         DIFF_OPERATOR = "-@"
14928         UNION_OPERATOR = "+@"
14929         
14930         for i in range(0, len(myfiles)):
14931                 if myfiles[i].startswith(SETPREFIX):
14932                         start = 0
14933                         end = 0
14934                         x = myfiles[i][len(SETPREFIX):]
14935                         newset = ""
14936                         while x:
14937                                 start = x.find(ARG_START)
14938                                 end = x.find(ARG_END)
14939                                 if start > 0 and start < end:
14940                                         namepart = x[:start]
14941                                         argpart = x[start+1:end]
14942                                 
14943                                         # TODO: implement proper quoting
14944                                         args = argpart.split(",")
14945                                         options = {}
14946                                         for a in args:
14947                                                 if "=" in a:
14948                                                         k, v  = a.split("=", 1)
14949                                                         options[k] = v
14950                                                 else:
14951                                                         options[a] = "True"
14952                                         setconfig.update(namepart, options)
14953                                         newset += (x[:start-len(namepart)]+namepart)
14954                                         x = x[end+len(ARG_END):]
14955                                 else:
14956                                         newset += x
14957                                         x = ""
14958                         myfiles[i] = SETPREFIX+newset
14959                                 
14960         sets = setconfig.getSets()
14961
14962         # display errors that occured while loading the SetConfig instance
14963         for e in setconfig.errors:
14964                 print colorize("BAD", "Error during set creation: %s" % e)
14965         
14966         # emerge relies on the existance of sets with names "world" and "system"
14967         required_sets = ("world", "system")
14968         missing_sets = []
14969
14970         for s in required_sets:
14971                 if s not in sets:
14972                         missing_sets.append(s)
14973         if missing_sets:
14974                 if len(missing_sets) > 2:
14975                         missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
14976                         missing_sets_str += ', and "%s"' % missing_sets[-1]
14977                 elif len(missing_sets) == 2:
14978                         missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
14979                 else:
14980                         missing_sets_str = '"%s"' % missing_sets[-1]
14981                 msg = ["emerge: incomplete set configuration, " + \
14982                         "missing set(s): %s" % missing_sets_str]
14983                 if sets:
14984                         msg.append("        sets defined: %s" % ", ".join(sets))
14985                 msg.append("        This usually means that '%s'" % \
14986                         (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
14987                 msg.append("        is missing or corrupt.")
14988                 for line in msg:
14989                         writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
14990                 return (None, 1)
14991         unmerge_actions = ("unmerge", "prune", "clean", "depclean")
14992
14993         for a in myfiles:
14994                 if a.startswith(SETPREFIX):
14995                         # support simple set operations (intersection, difference and union)
14996                         # on the commandline. Expressions are evaluated strictly left-to-right
14997                         if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
14998                                 expression = a[len(SETPREFIX):]
14999                                 expr_sets = []
15000                                 expr_ops = []
15001                                 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
15002                                         is_pos = expression.rfind(IS_OPERATOR)
15003                                         diff_pos = expression.rfind(DIFF_OPERATOR)
15004                                         union_pos = expression.rfind(UNION_OPERATOR)
15005                                         op_pos = max(is_pos, diff_pos, union_pos)
15006                                         s1 = expression[:op_pos]
15007                                         s2 = expression[op_pos+len(IS_OPERATOR):]
15008                                         op = expression[op_pos:op_pos+len(IS_OPERATOR)]
15009                                         if not s2 in sets:
15010                                                 display_missing_pkg_set(root_config, s2)
15011                                                 return (None, 1)
15012                                         expr_sets.insert(0, s2)
15013                                         expr_ops.insert(0, op)
15014                                         expression = s1
15015                                 if not expression in sets:
15016                                         display_missing_pkg_set(root_config, expression)
15017                                         return (None, 1)
15018                                 expr_sets.insert(0, expression)
15019                                 result = set(setconfig.getSetAtoms(expression))
15020                                 for i in range(0, len(expr_ops)):
15021                                         s2 = setconfig.getSetAtoms(expr_sets[i+1])
15022                                         if expr_ops[i] == IS_OPERATOR:
15023                                                 result.intersection_update(s2)
15024                                         elif expr_ops[i] == DIFF_OPERATOR:
15025                                                 result.difference_update(s2)
15026                                         elif expr_ops[i] == UNION_OPERATOR:
15027                                                 result.update(s2)
15028                                         else:
15029                                                 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
15030                                 newargs.extend(result)
15031                         else:                   
15032                                 s = a[len(SETPREFIX):]
15033                                 if s not in sets:
15034                                         display_missing_pkg_set(root_config, s)
15035                                         return (None, 1)
15036                                 setconfig.active.append(s)
15037                                 try:
15038                                         set_atoms = setconfig.getSetAtoms(s)
15039                                 except portage.exception.PackageSetNotFound, e:
15040                                         writemsg_level(("emerge: the given set '%s' " + \
15041                                                 "contains a non-existent set named '%s'.\n") % \
15042                                                 (s, e), level=logging.ERROR, noiselevel=-1)
15043                                         return (None, 1)
15044                                 if myaction in unmerge_actions and \
15045                                                 not sets[s].supportsOperation("unmerge"):
15046                                         sys.stderr.write("emerge: the given set '%s' does " % s + \
15047                                                 "not support unmerge operations\n")
15048                                         retval = 1
15049                                 elif not set_atoms:
15050                                         print "emerge: '%s' is an empty set" % s
15051                                 elif myaction not in do_not_expand:
15052                                         newargs.extend(set_atoms)
15053                                 else:
15054                                         newargs.append(SETPREFIX+s)
15055                                 for e in sets[s].errors:
15056                                         print e
15057                 else:
15058                         newargs.append(a)
15059         return (newargs, retval)
15060
15061 def repo_name_check(trees):
15062         missing_repo_names = set()
15063         for root, root_trees in trees.iteritems():
15064                 if "porttree" in root_trees:
15065                         portdb = root_trees["porttree"].dbapi
15066                         missing_repo_names.update(portdb.porttrees)
15067                         repos = portdb.getRepositories()
15068                         for r in repos:
15069                                 missing_repo_names.discard(portdb.getRepositoryPath(r))
15070                         if portdb.porttree_root in missing_repo_names and \
15071                                 not os.path.exists(os.path.join(
15072                                 portdb.porttree_root, "profiles")):
15073                                 # This is normal if $PORTDIR happens to be empty,
15074                                 # so don't warn about it.
15075                                 missing_repo_names.remove(portdb.porttree_root)
15076
15077         if missing_repo_names:
15078                 msg = []
15079                 msg.append("WARNING: One or more repositories " + \
15080                         "have missing repo_name entries:")
15081                 msg.append("")
15082                 for p in missing_repo_names:
15083                         msg.append("\t%s/profiles/repo_name" % (p,))
15084                 msg.append("")
15085                 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
15086                         "should be a plain text file containing a unique " + \
15087                         "name for the repository on the first line.", 70))
15088                 writemsg_level("".join("%s\n" % l for l in msg),
15089                         level=logging.WARNING, noiselevel=-1)
15090
15091         return bool(missing_repo_names)
15092
15093 def config_protect_check(trees):
15094         for root, root_trees in trees.iteritems():
15095                 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
15096                         msg = "!!! CONFIG_PROTECT is empty"
15097                         if root != "/":
15098                                 msg += " for '%s'" % root
15099                         writemsg_level(msg, level=logging.WARN, noiselevel=-1)
15100
15101 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
15102
15103         if "--quiet" in myopts:
15104                 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15105                 print "!!! one of the following fully-qualified ebuild names instead:\n"
15106                 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15107                         print "    " + colorize("INFORM", cp)
15108                 return
15109
15110         s = search(root_config, spinner, "--searchdesc" in myopts,
15111                 "--quiet" not in myopts, "--usepkg" in myopts,
15112                 "--usepkgonly" in myopts)
15113         null_cp = portage.dep_getkey(insert_category_into_atom(
15114                 arg, "null"))
15115         cat, atom_pn = portage.catsplit(null_cp)
15116         s.searchkey = atom_pn
15117         for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15118                 s.addCP(cp)
15119         s.output()
15120         print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15121         print "!!! one of the above fully-qualified ebuild names instead.\n"
15122
15123 def profile_check(trees, myaction, myopts):
15124         if myaction in ("info", "sync"):
15125                 return os.EX_OK
15126         elif "--version" in myopts or "--help" in myopts:
15127                 return os.EX_OK
15128         for root, root_trees in trees.iteritems():
15129                 if root_trees["root_config"].settings.profiles:
15130                         continue
15131                 # generate some profile related warning messages
15132                 validate_ebuild_environment(trees)
15133                 msg = "If you have just changed your profile configuration, you " + \
15134                         "should revert back to the previous configuration. Due to " + \
15135                         "your current profile being invalid, allowed actions are " + \
15136                         "limited to --help, --info, --sync, and --version."
15137                 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
15138                         level=logging.ERROR, noiselevel=-1)
15139                 return 1
15140         return os.EX_OK
15141
15142 def emerge_main():
15143         global portage  # NFC why this is necessary now - genone
15144         portage._disable_legacy_globals()
15145         # Disable color until we're sure that it should be enabled (after
15146         # EMERGE_DEFAULT_OPTS has been parsed).
15147         portage.output.havecolor = 0
15148         # This first pass is just for options that need to be known as early as
15149         # possible, such as --config-root.  They will be parsed again later,
15150         # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
15151         # the value of --config-root).
15152         myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
15153         if "--debug" in myopts:
15154                 os.environ["PORTAGE_DEBUG"] = "1"
15155         if "--config-root" in myopts:
15156                 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
15157
15158         # Portage needs to ensure a sane umask for the files it creates.
15159         os.umask(022)
15160         settings, trees, mtimedb = load_emerge_config()
15161         portdb = trees[settings["ROOT"]]["porttree"].dbapi
15162         rval = profile_check(trees, myaction, myopts)
15163         if rval != os.EX_OK:
15164                 return rval
15165
15166         if portage._global_updates(trees, mtimedb["updates"]):
15167                 mtimedb.commit()
15168                 # Reload the whole config from scratch.
15169                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15170                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15171
15172         xterm_titles = "notitles" not in settings.features
15173
15174         tmpcmdline = []
15175         if "--ignore-default-opts" not in myopts:
15176                 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
15177         tmpcmdline.extend(sys.argv[1:])
15178         myaction, myopts, myfiles = parse_opts(tmpcmdline)
15179
15180         if "--digest" in myopts:
15181                 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
15182                 # Reload the whole config from scratch so that the portdbapi internal
15183                 # config is updated with new FEATURES.
15184                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15185                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15186
15187         for myroot in trees:
15188                 mysettings =  trees[myroot]["vartree"].settings
15189                 mysettings.unlock()
15190                 adjust_config(myopts, mysettings)
15191                 if "--pretend" not in myopts:
15192                         mysettings["PORTAGE_COUNTER_HASH"] = \
15193                                 trees[myroot]["vartree"].dbapi._counter_hash()
15194                         mysettings.backup_changes("PORTAGE_COUNTER_HASH")
15195                 mysettings.lock()
15196                 del myroot, mysettings
15197
15198         apply_priorities(settings)
15199
15200         spinner = stdout_spinner()
15201         if "candy" in settings.features:
15202                 spinner.update = spinner.update_scroll
15203
15204         if "--quiet" not in myopts:
15205                 portage.deprecated_profile_check(settings=settings)
15206                 repo_name_check(trees)
15207                 config_protect_check(trees)
15208
15209         eclasses_overridden = {}
15210         for mytrees in trees.itervalues():
15211                 mydb = mytrees["porttree"].dbapi
15212                 # Freeze the portdbapi for performance (memoize all xmatch results).
15213                 mydb.freeze()
15214                 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
15215         del mytrees, mydb
15216
15217         if eclasses_overridden and \
15218                 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
15219                 prefix = bad(" * ")
15220                 if len(eclasses_overridden) == 1:
15221                         writemsg(prefix + "Overlay eclass overrides " + \
15222                                 "eclass from PORTDIR:\n", noiselevel=-1)
15223                 else:
15224                         writemsg(prefix + "Overlay eclasses override " + \
15225                                 "eclasses from PORTDIR:\n", noiselevel=-1)
15226                 writemsg(prefix + "\n", noiselevel=-1)
15227                 for eclass_name in sorted(eclasses_overridden):
15228                         writemsg(prefix + "  '%s/%s.eclass'\n" % \
15229                                 (eclasses_overridden[eclass_name], eclass_name),
15230                                 noiselevel=-1)
15231                 writemsg(prefix + "\n", noiselevel=-1)
15232                 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
15233                 "because it will trigger invalidation of cached ebuild metadata " + \
15234                 "that is distributed with the portage tree. If you must " + \
15235                 "override eclasses from PORTDIR then you are advised to add " + \
15236                 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
15237                 "`emerge --regen` after each time that you run `emerge --sync`. " + \
15238                 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
15239                 "you would like to disable this warning."
15240                 from textwrap import wrap
15241                 for line in wrap(msg, 72):
15242                         writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
15243
15244         if "moo" in myfiles:
15245                 print """
15246
15247   Larry loves Gentoo (""" + platform.system() + """)
15248
15249  _______________________
15250 < Have you mooed today? >
15251  -----------------------
15252         \   ^__^
15253          \  (oo)\_______
15254             (__)\       )\/\ 
15255                 ||----w |
15256                 ||     ||
15257
15258 """
15259
15260         for x in myfiles:
15261                 ext = os.path.splitext(x)[1]
15262                 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
15263                         print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
15264                         break
15265
15266         root_config = trees[settings["ROOT"]]["root_config"]
15267         if myaction == "list-sets":
15268                 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
15269                 sys.stdout.flush()
15270                 return os.EX_OK
15271
15272         # only expand sets for actions taking package arguments
15273         oldargs = myfiles[:]
15274         if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
15275                 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
15276                 if retval != os.EX_OK:
15277                         return retval
15278
15279                 # Need to handle empty sets specially, otherwise emerge will react 
15280                 # with the help message for empty argument lists
15281                 if oldargs and not myfiles:
15282                         print "emerge: no targets left after set expansion"
15283                         return 0
15284
15285         if ("--tree" in myopts) and ("--columns" in myopts):
15286                 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
15287                 return 1
15288
15289         if ("--quiet" in myopts):
15290                 spinner.update = spinner.update_quiet
15291                 portage.util.noiselimit = -1
15292
15293         # Always create packages if FEATURES=buildpkg
15294         # Imply --buildpkg if --buildpkgonly
15295         if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
15296                 if "--buildpkg" not in myopts:
15297                         myopts["--buildpkg"] = True
15298
15299         # Also allow -S to invoke search action (-sS)
15300         if ("--searchdesc" in myopts):
15301                 if myaction and myaction != "search":
15302                         myfiles.append(myaction)
15303                 if "--search" not in myopts:
15304                         myopts["--search"] = True
15305                 myaction = "search"
15306
15307         # Always try and fetch binary packages if FEATURES=getbinpkg
15308         if ("getbinpkg" in settings.features):
15309                 myopts["--getbinpkg"] = True
15310
15311         if "--buildpkgonly" in myopts:
15312                 # --buildpkgonly will not merge anything, so
15313                 # it cancels all binary package options.
15314                 for opt in ("--getbinpkg", "--getbinpkgonly",
15315                         "--usepkg", "--usepkgonly"):
15316                         myopts.pop(opt, None)
15317
15318         if "--fetch-all-uri" in myopts:
15319                 myopts["--fetchonly"] = True
15320
15321         if "--skipfirst" in myopts and "--resume" not in myopts:
15322                 myopts["--resume"] = True
15323
15324         if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
15325                 myopts["--usepkgonly"] = True
15326
15327         if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
15328                 myopts["--getbinpkg"] = True
15329
15330         if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
15331                 myopts["--usepkg"] = True
15332
15333         # Also allow -K to apply --usepkg/-k
15334         if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
15335                 myopts["--usepkg"] = True
15336
15337         # Allow -p to remove --ask
15338         if ("--pretend" in myopts) and ("--ask" in myopts):
15339                 print ">>> --pretend disables --ask... removing --ask from options."
15340                 del myopts["--ask"]
15341
15342         # forbid --ask when not in a terminal
15343         # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
15344         if ("--ask" in myopts) and (not sys.stdin.isatty()):
15345                 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
15346                         noiselevel=-1)
15347                 return 1
15348
15349         if settings.get("PORTAGE_DEBUG", "") == "1":
15350                 spinner.update = spinner.update_quiet
15351                 portage.debug=1
15352                 if "python-trace" in settings.features:
15353                         import portage.debug
15354                         portage.debug.set_trace(True)
15355
15356         if not ("--quiet" in myopts):
15357                 if not sys.stdout.isatty() or ("--nospinner" in myopts):
15358                         spinner.update = spinner.update_basic
15359
15360         if "--version" in myopts:
15361                 print getportageversion(settings["PORTDIR"], settings["ROOT"],
15362                         settings.profile_path, settings["CHOST"],
15363                         trees[settings["ROOT"]]["vartree"].dbapi)
15364                 return 0
15365         elif "--help" in myopts:
15366                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15367                 return 0
15368
15369         if "--debug" in myopts:
15370                 print "myaction", myaction
15371                 print "myopts", myopts
15372
15373         if not myaction and not myfiles and "--resume" not in myopts:
15374                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15375                 return 1
15376
15377         pretend = "--pretend" in myopts
15378         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
15379         buildpkgonly = "--buildpkgonly" in myopts
15380
15381         # check if root user is the current user for the actions where emerge needs this
15382         if portage.secpass < 2:
15383                 # We've already allowed "--version" and "--help" above.
15384                 if "--pretend" not in myopts and myaction not in ("search","info"):
15385                         need_superuser = not \
15386                                 (fetchonly or \
15387                                 (buildpkgonly and secpass >= 1) or \
15388                                 myaction in ("metadata", "regen") or \
15389                                 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
15390                         if portage.secpass < 1 or \
15391                                 need_superuser:
15392                                 if need_superuser:
15393                                         access_desc = "superuser"
15394                                 else:
15395                                         access_desc = "portage group"
15396                                 # Always show portage_group_warning() when only portage group
15397                                 # access is required but the user is not in the portage group.
15398                                 from portage.data import portage_group_warning
15399                                 if "--ask" in myopts:
15400                                         myopts["--pretend"] = True
15401                                         del myopts["--ask"]
15402                                         print ("%s access is required... " + \
15403                                                 "adding --pretend to options.\n") % access_desc
15404                                         if portage.secpass < 1 and not need_superuser:
15405                                                 portage_group_warning()
15406                                 else:
15407                                         sys.stderr.write(("emerge: %s access is " + \
15408                                                 "required.\n\n") % access_desc)
15409                                         if portage.secpass < 1 and not need_superuser:
15410                                                 portage_group_warning()
15411                                         return 1
15412
15413         disable_emergelog = False
15414         for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
15415                 if x in myopts:
15416                         disable_emergelog = True
15417                         break
15418         if myaction in ("search", "info"):
15419                 disable_emergelog = True
15420         if disable_emergelog:
15421                 """ Disable emergelog for everything except build or unmerge
15422                 operations.  This helps minimize parallel emerge.log entries that can
15423                 confuse log parsers.  We especially want it disabled during
15424                 parallel-fetch, which uses --resume --fetchonly."""
15425                 global emergelog
15426                 def emergelog(*pargs, **kargs):
15427                         pass
15428
15429         if not "--pretend" in myopts:
15430                 emergelog(xterm_titles, "Started emerge on: "+\
15431                         time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
15432                 myelogstr=""
15433                 if myopts:
15434                         myelogstr=" ".join(myopts)
15435                 if myaction:
15436                         myelogstr+=" "+myaction
15437                 if myfiles:
15438                         myelogstr += " " + " ".join(oldargs)
15439                 emergelog(xterm_titles, " *** emerge " + myelogstr)
15440         del oldargs
15441
15442         def emergeexitsig(signum, frame):
15443                 signal.signal(signal.SIGINT, signal.SIG_IGN)
15444                 signal.signal(signal.SIGTERM, signal.SIG_IGN)
15445                 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
15446                 sys.exit(100+signum)
15447         signal.signal(signal.SIGINT, emergeexitsig)
15448         signal.signal(signal.SIGTERM, emergeexitsig)
15449
15450         def emergeexit():
15451                 """This gets out final log message in before we quit."""
15452                 if "--pretend" not in myopts:
15453                         emergelog(xterm_titles, " *** terminating.")
15454                 if "notitles" not in settings.features:
15455                         xtermTitleReset()
15456         portage.atexit_register(emergeexit)
15457
15458         if myaction in ("config", "metadata", "regen", "sync"):
15459                 if "--pretend" in myopts:
15460                         sys.stderr.write(("emerge: The '%s' action does " + \
15461                                 "not support '--pretend'.\n") % myaction)
15462                         return 1
15463
15464         if "sync" == myaction:
15465                 return action_sync(settings, trees, mtimedb, myopts, myaction)
15466         elif "metadata" == myaction:
15467                 action_metadata(settings, portdb, myopts)
15468         elif myaction=="regen":
15469                 validate_ebuild_environment(trees)
15470                 return action_regen(settings, portdb, myopts.get("--jobs"),
15471                         myopts.get("--load-average"))
15472         # HELP action
15473         elif "config"==myaction:
15474                 validate_ebuild_environment(trees)
15475                 action_config(settings, trees, myopts, myfiles)
15476
15477         # SEARCH action
15478         elif "search"==myaction:
15479                 validate_ebuild_environment(trees)
15480                 action_search(trees[settings["ROOT"]]["root_config"],
15481                         myopts, myfiles, spinner)
15482         elif myaction in ("clean", "unmerge") or \
15483                 (myaction == "prune" and "--nodeps" in myopts):
15484                 validate_ebuild_environment(trees)
15485
15486                 # Ensure atoms are valid before calling unmerge().
15487                 # For backward compat, leading '=' is not required.
15488                 for x in myfiles:
15489                         if is_valid_package_atom(x) or \
15490                                 is_valid_package_atom("=" + x):
15491                                 continue
15492                         msg = []
15493                         msg.append("'%s' is not a valid package atom." % (x,))
15494                         msg.append("Please check ebuild(5) for full details.")
15495                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15496                                 level=logging.ERROR, noiselevel=-1)
15497                         return 1
15498
15499                 # When given a list of atoms, unmerge
15500                 # them in the order given.
15501                 ordered = myaction == "unmerge"
15502                 if 1 == unmerge(root_config, myopts, myaction, myfiles,
15503                         mtimedb["ldpath"], ordered=ordered):
15504                         if not (buildpkgonly or fetchonly or pretend):
15505                                 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15506
15507         elif myaction in ("depclean", "info", "prune"):
15508
15509                 # Ensure atoms are valid before calling unmerge().
15510                 vardb = trees[settings["ROOT"]]["vartree"].dbapi
15511                 valid_atoms = []
15512                 for x in myfiles:
15513                         if is_valid_package_atom(x):
15514                                 try:
15515                                         valid_atoms.append(
15516                                                 portage.dep_expand(x, mydb=vardb, settings=settings))
15517                                 except portage.exception.AmbiguousPackageName, e:
15518                                         msg = "The short ebuild name \"" + x + \
15519                                                 "\" is ambiguous.  Please specify " + \
15520                                                 "one of the following " + \
15521                                                 "fully-qualified ebuild names instead:"
15522                                         for line in textwrap.wrap(msg, 70):
15523                                                 writemsg_level("!!! %s\n" % (line,),
15524                                                         level=logging.ERROR, noiselevel=-1)
15525                                         for i in e[0]:
15526                                                 writemsg_level("    %s\n" % colorize("INFORM", i),
15527                                                         level=logging.ERROR, noiselevel=-1)
15528                                         writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
15529                                         return 1
15530                                 continue
15531                         msg = []
15532                         msg.append("'%s' is not a valid package atom." % (x,))
15533                         msg.append("Please check ebuild(5) for full details.")
15534                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15535                                 level=logging.ERROR, noiselevel=-1)
15536                         return 1
15537
15538                 if myaction == "info":
15539                         return action_info(settings, trees, myopts, valid_atoms)
15540
15541                 validate_ebuild_environment(trees)
15542                 action_depclean(settings, trees, mtimedb["ldpath"],
15543                         myopts, myaction, valid_atoms, spinner)
15544                 if not (buildpkgonly or fetchonly or pretend):
15545                         post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15546         # "update", "system", or just process files:
15547         else:
15548                 validate_ebuild_environment(trees)
15549                 if "--pretend" not in myopts:
15550                         display_news_notification(root_config, myopts)
15551                 retval = action_build(settings, trees, mtimedb,
15552                         myopts, myaction, myfiles, spinner)
15553                 root_config = trees[settings["ROOT"]]["root_config"]
15554                 post_emerge(root_config, myopts, mtimedb, retval)
15555
15556                 return retval